hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
c1050c7ce687e61e8e2db52f5513e5835b3dfcf2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // gpucompute/cuda-kernels.cu // Copyright 2009-2012 Karel Vesely // 2013 Ehsan Variani // 2013 Johns Hopkins University (author: Daniel Povey) // 2013 Hainan Xu // 2013 Xiaohui Zhang // 2013 Johns Hopkins University (author: Guoguo Chen) // 2015 Yajie Miao // 2017 Jayadev Billa (added LSTM pointwise ops kernel) // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. // In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers #include <cfloat> #include "cuda-kernels.h" #include "cuPrintf_hip.cuh" #include "cuPrintf.hip" #include "ctc-utils.h" #include "stdio.h" /*********************************************************************** * Generic __device__ functions */ template<typename Real> __device__ static Real _sum_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (sum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x >= halfPoint) { // was < // Get the shared value stored by another thread Real temp = 0.0; if(threadIdx.x < nTotalThreads) { // was +halfPoint temp = buffer[threadIdx.x]; // was +halfPoint } buffer[threadIdx.x - halfPoint] += temp; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static Real _min_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (min) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp < buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two } // the result return buffer[0]; } template<typename Real> __device__ static Real _max_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (max) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (get index of maximum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread Real temp = -1e20; if(threadIdx.x+halfPoint < nTotalThreads) { temp = val[idx[threadIdx.x + halfPoint]]; } if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return idx[0]; } /*********************************************************************** * CUDA kernels * the functions are templated to have the float/double operations */ // for this kernel, following the newer pattern, the x-dim is the row-index, the // y-dim is the col-index. template<typename Real, typename OtherReal> __global__ static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index. int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = j + i * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } // for this kernel, the x-dim is the row-index at the output, the y-dim is the // col-index at the output template<typename Real, typename OtherReal> __global__ static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index out int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index out int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = i + j * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<typename Real> __global__ static void _apply_exp(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if ( i < d.cols && j < d.rows ) { mat[index] = exp(mat[index]); } } template<typename Real> __global__ static void _set_const(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = value; } template<typename Real> __global__ static void _add(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] + value; } template<typename Real> __global__ static void _scale(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] * value; } template<typename Real> __global__ static void _apply_log(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = log(mat[index]+FLT_EPSILON); } template<typename Real> __global__ static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] * A[src_index]; } template<typename Real> __global__ static void _vec_mul_elements(Real* v, const Real* a, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) v[i] = v[i] * a[i]; } template<typename Real> __global__ static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[j]; } template<typename Real> __global__ static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; int32_cuda index_src = i + j*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j *d.stride; int32_cuda index_src = j + i*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*row[i] + beta*dst[index]; } /* * CuVector */ template<typename Real> __global__ static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (double) v_in[i]; } } template<typename Real> __global__ static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if ( i < dim) { v_out[i] = (float) v_in[i]; } } template<typename Real> __global__ static void _vec_min(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= CU1DBLOCK) return; __shared__ Real row_data[CU1DBLOCK]; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real min = 1.0 / 0.0; // infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j < min) min = v_j; } row_data[i] = min; __syncthreads(); //get the sum *value = _min_reduce(row_data); } template<typename Real> __global__ static void _vec_max(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.y > 0) return; __shared__ Real row_data[CU1DBLOCK]; if(i >= CU1DBLOCK) return; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real max = -1.0 / 0.0; // -infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j > max) max = v_j; } row_data[i] = max; __syncthreads(); //get the sum *value = _max_reduce(row_data); } // Adds diag(M N) to v, where M and N are matrices. We supply row_stride and // col_stride arguments for M and N, and swapping them allows us to transpose // those matrices. Note: we imagine row-major indexing here, just like Kaldi // and CBLAS (but unlike CUBLAS). // This kernel expects the blockDim to be (CU1DBLOCK, 1) and the // gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element. // threads_per_element should be a power of 2. template<typename Real> __global__ static void _add_diag_mat_mat( Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride, int M_col_stride, const Real *N, int N_row_stride, int N_col_stride, int threads_per_element, Real beta) { // we actually assume blockDim.x == CU1DBLOCK here. // Each diagonal element of v is processed by "threads_per_element" threads. __shared__ Real temp_data[CU1DBLOCK]; int i = blockIdx.x * blockDim.x + threadIdx.x; int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells // us which block of elements we sum up. if (v_idx >= v_dim) return; Real sum = 0.0; for (int j = sub_idx; j < M_cols; j += threads_per_element) { int M_index = v_idx * M_row_stride + j * M_col_stride, N_index = j * N_row_stride + v_idx * N_col_stride; sum += M[M_index] * N[N_index]; } temp_data[threadIdx.x] = sum; // start_idx = threadIdx.x - sub_idx; // start of the position in temp_data // that we want to sum up. // The following is a tree-based reduction of the elements of temp_data from // start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx". __syncthreads(); int num_total_threads = threads_per_element; while (num_total_threads > 1) { int half_point = ((1 + num_total_threads) >> 1); if (sub_idx < half_point) { Real temp = 0.0; if (sub_idx + half_point < num_total_threads) { temp = temp_data[threadIdx.x + half_point]; } temp_data[threadIdx.x] += temp; } __syncthreads(); num_total_threads = half_point; } if (sub_idx == 0) { v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x]; } } template<typename Real> __global__ static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) v[i] = alpha * x[i] * y[i] + beta * v[i]; } template<typename Real> __global__ static void _vec_apply_exp(Real* v, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v[i] = exp(v[i]); } } template<typename Real> __global__ static void _vec_apply_log(Real* v, Real* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { if (v[i] < 0) { *flag = 1; return; } v[i] = log(v[i]); } } template<typename Real> __global__ static void _vec_sum(Real *v, Real *sum, int dim, int inc) { int i = threadIdx.x; __shared__ Real row_data[CU1DBLOCK]; if (i >= CU1DBLOCK) return; Real tmp_sum = 0; int size = dim / CU1DBLOCK; //the least size in a loop (later part) int threshold = dim - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i * size; loop_end = threshold + (i+1) * size; } for(int j = loop_start; j< loop_end; j++) { tmp_sum += v[j * inc]; } row_data[threadIdx.x] = tmp_sum; __syncthreads(); *sum = _sum_reduce(row_data); } template<typename Real> __global__ static void _pvec_sum(Real* v, Real* g, int dim, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int start = size * i; if (start >= dim) return; int end = start + size; if (end > dim) end = dim; __shared__ Real row_data[CU1DBLOCK]; Real sum = 0; for (int j = start; j < end; j++) sum += v[j]; row_data[threadIdx.x] = sum; __syncthreads(); g[blockIdx.x] = _sum_reduce(row_data); } template<typename Real> __global__ static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] < floor_val) { v[i] = floor_val; count[i] = 1; } else { count[i] = 0; } } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_heaviside(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0); } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_pow(Real* mat, Real power, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { if (power == 1.0) return; if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { if (!(mat[index] >= 0.0)) return; mat[index] = sqrt(mat[index]); } else { mat[index] = pow(mat[index], power); } } } template<typename Real> __global__ static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (mat[index] < floor_val) mat[index] = floor_val; } } template<typename Real> __global__ static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows ) { if (mat[index] > ceiling_val) mat[index] = ceiling_val; } } template<typename Real> __global__ static void _invert_elements(Real* data, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) data[index] = 1.0 / data[index]; } template<typename Real> __global__ static void _sqrt_elements(Real* data, Real epsilon, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) data[index] = sqrt(data[index]+epsilon); } template<typename Real> __global__ static void _add_row_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) { int i = blockIdx.y * blockDim.y + threadIdx.y; //col int j = blockIdx.x * blockDim.x + threadIdx.x; //row if(blockIdx.x > 0) return; if(blockDim.y != 1) return; __shared__ Real row_data[CU1DBLOCK]; //copy the input to row_data row_data[j] = mat[i+j*d.stride]; __syncthreads(); //get the sum Real sum = _sum_reduce(row_data); __syncthreads(); //add to previously accumulated sum if(threadIdx.x == 0) vec_sum[i] += sum; } template<typename Real> __global__ static void _add_col_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; //row int j = blockIdx.y * blockDim.y + threadIdx.y; //col if(blockIdx.x > 0) return; if(blockDim.y != 1) return; __shared__ Real row_data[CU1DBLOCK]; //copy the input to row_data row_data[i] = mat[i+j*d.stride]; __syncthreads(); //get the sum Real sum = _sum_reduce(row_data); __syncthreads(); //add to previously accumulated sum if(threadIdx.x == 0) vec_sum[j] += sum; } template<typename Real> __global__ static void _add_mat_mat_elements(Real *data, const Real *srcA_data, const Real *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j * dim.stride; int32_cuda srcA_index = i + j * srcA_stride; int32_cuda srcB_index = i + j * srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index] + beta * data[tgt_index]; } } /* * cu:: */ template<typename Real> __global__ static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = 1.0 / (1.0 + exp(-x[src_index])); y[dst_index] = res; } } template<typename Real> __global__ static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = y[y_index]*(1.0-y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j * src_stride; if(i < d.cols && j < d.rows) { Real exp_2x = exp(2.0*x[src_index]); Real res; if(isinf(exp_2x)) { res = 1.0; } else { res = (exp_2x - 1.0) / (exp_2x + 1.0); } y[dst_index] = res; } } template<typename Real> __global__ static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (1.0 - y[y_index]*y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; //copy input to aux aux[threadIdx.x] = x[threadIdx.x+j*d.stride]; for(int i=1; i<steps; ++i) { if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride]) aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride]; } //get the maximum value int nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real max = aux[0]; __syncthreads(); // subtract max, apply exp, sum up... y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max); aux[threadIdx.x] = y[threadIdx.x+j*d.stride]; for(int i=1; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max); aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride]; } } nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real sum = aux[0]; __syncthreads(); //normalize by sum... for(int i=0; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum; } } } template<typename Real> __global__ static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = i % d_in.cols; int32_cuda src_row = j + off[i / d_in.cols]; if(src_row < 0) src_row = 0; if(src_row >= d_in.rows) src_row = d_in.rows-1; y[index] = x[src_col + src_row*d_in.stride]; } } template<typename Real> __global__ static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = copy_from[i]; if(src_col >= 0 && src_col < d_in.cols) { y[index] = x[src_col + j*d_in.stride]; } else { y[index] = 1.0/0.0; } } } template<typename Real> __global__ static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_row = copy_from[j]; y[index] = x[i + src_row*d_in.stride]; } } template<typename Real> __global__ static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, grad_index = i + j*stride_grad; if (i < d.cols && j < d.rows) { if(wei[index]==0.0) return; //skip L1 if zero weight! Real l1_signed = l1; if(wei[index] < 0.0) //flip sign l1_signed = -l1; Real before = wei[index]; Real after = wei[index] -lr*grad[grad_index] -l1_signed;//simulate update if((after > 0.0) ^ (before > 0.0)) { //sign changed? wei[index] = 0.0; grad[grad_index] = 0.0; } else { wei[index] -= l1_signed; } } } template<typename Real> __global__ static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(blockIdx.x > 0) return; if(blockDim.y != 1) return; __shared__ Real value[CU1DBLOCK]; __shared__ int32_cuda index[CU1DBLOCK]; //copy to shared memory value[threadIdx.x] = mat[i+j*d.stride]; index[threadIdx.x] = threadIdx.x; __syncthreads(); //get the id of the max value int32_cuda out_max = _max_id_reduce(value, index); __syncthreads(); //see if it's bigger value if(threadIdx.x == 0) { if(vec_val[j] <= mat[out_max+j*d.stride]) { vec_val[j] = mat[out_max+j*d.stride]; vec_id[j] = voff+out_max; } } } /*********************************************************************** * ANSI-C wrappers of CUDA kernels */ /* * "int32" */ void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } /* * CuMatrix */ void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d); } void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d); } void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d); } void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d); } void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } else { hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } } void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } else { hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } } void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d); } void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d); } void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } /* * CuVector */ void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) { hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim); } void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) { hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim); } void cudaF_vec_min(const float* v, float* value, int dim) { hipLaunchKernelGGL(( _vec_min), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaD_vec_min(const double* v, double* value, int dim) { hipLaunchKernelGGL(( _vec_min), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaF_vec_max(const float* v, float* value, int dim) { hipLaunchKernelGGL(( _vec_max), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaD_vec_max(const double* v, double* value, int dim) { hipLaunchKernelGGL(( _vec_max), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M, int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride, int N_col_stride, int threads_per_element, float beta) { hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M, int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride, int N_col_stride, int threads_per_element, double beta) { hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) { hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim); } void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) { hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim); } void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_sum), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc); } void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_sum), dim3(Gr),dim3(Bl), 0, 0, v,value,dim,inc); } void cudaF_pvec_sum(int Gr, int Bl, float* v, float* pvec_sum, int dim, int size) { hipLaunchKernelGGL(( _pvec_sum), dim3(Gr),dim3(Bl), 0, 0, v, pvec_sum, dim, size); } void cudaD_pvec_sum(int Gr, int Bl, double* v, double* pvec_sum, int dim, int size) { hipLaunchKernelGGL(( _pvec_sum), dim3(Gr),dim3(Bl), 0, 0, v,pvec_sum,dim,size); } void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim); } void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim); } void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) { hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim); } void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) { hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim); } void cudaF_sqrt_elements(dim3 Gr, dim3 Bl, float* data, float epsilon, MatrixDim d) { hipLaunchKernelGGL(( _sqrt_elements), dim3(Gr),dim3(Bl), 0, 0, data, epsilon, d); } void cudaD_sqrt_elements(dim3 Gr, dim3 Bl, double* data, double epsilon, MatrixDim d) { hipLaunchKernelGGL(( _sqrt_elements), dim3(Gr),dim3(Bl), 0, 0, data, epsilon, d); } void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) { hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d); } void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) { hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d); } void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) { hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim); } void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) { hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim); } void cudaF_add_row_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) { hipLaunchKernelGGL(( _add_row_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d); } void cudaD_add_row_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) { hipLaunchKernelGGL(( _add_row_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d); } void cudaF_add_col_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) { hipLaunchKernelGGL(( _add_col_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d); } void cudaD_add_col_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) { hipLaunchKernelGGL(( _add_col_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d); } /* * cu:: */ void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in); } void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in); } void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) { hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad); } void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d,int stride_grad) { hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad); } void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d); } void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d); } /* Some conversion kernels for which it's more convenient to not name them F or D. */ void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } /* * lstm:: Added kernels for LSTM pointwise ops - if you are changing this be cognizant of GPU register usage since it will * affect speed. */ #define APPLY_CUDA_MAX_GRADIENT(x,limit) fminf( fmaxf((x), (-(limit))), (limit) ) template<typename Real> __global__ static void _propagate_lstm_pointwiseops_nodrop(Real *yi, Real *yf, Real *yg, Real *yo, Real *yc, Real *yh, Real *ym, const Real *ycr, const Real *pi, const Real *pf, const Real *po, MatrixDim mat_dim, const Real max_grad = 100 ) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j; if (i < mat_dim.rows && j < mat_dim.cols) { /* // input gate y_i.AddMatDiagVec(1.0, YC.RowRange((t-1)*S,S), kNoTrans, phole_i_c_fw_, 1.0); // forget gate y_f.AddMatDiagVec(1.0, YC.RowRange((t-1)*S,S), kNoTrans, phole_f_c_fw_, 1.0); // apply sigmoid/tanh functionis to squash the outputs y_i.Sigmoid(y_i); y_f.Sigmoid(y_f); */ Real r1 = ycr[index]; //3 Real r2 = yi[index]; //5 Real r3 = yf[index]; //5 Real r4; // input gate r2 = r1 * pi[j] + r2; // OLD: r2 = 1.0f / (1.0f + __expf(-r2)); //1.0 / (1.0 + expf(-ycr_t * pi[j] - yi_t)); // single precision r2 = __expf(-r2); if (isinf(r2)) { r2 = 0.0f; } else { r2 = 1.0f / (1.0f + r2); //1.0 / (1.0 + expf(-ycr_t * pi[j] - yi_t)); // single precision } yi[index] = r2; // forget gate r3 = r1 * pf[j] + r3; // OLD: r3 = 1.0f / (1.0f + __expf(-r3)); //(1.0 + expf(-ycr_t * pf[j] - yf_t)); // single precision r3 = __expf(-r3); if (isinf(r3)) { r3 = 0.0f; } else { r3 = 1.0f / (1.0f + r3); //(1.0 + expf(-ycr_t * pf[j] - yf_t)); // single precision } yf[index] = r3; r3 = r3 * r1; // ycr * yf // input tanh y_g.Tanh(y_g); r4 = __expf(2.0f * yg[index]); if(isinf(r4)) { r1 = 1.0f; } else { r1 = (r4 - 1.0f) / (r4 + 1.0f); } //yg_t = tanhf(yg_t); // single precision yg[index] = r1; /* // memory cell y_c.AddMatDotMat(1.0, y_g, kNoTrans, y_i, kNoTrans, 0.0); y_c.AddMatDotMat(1.0, YC.RowRange((t-1)*S,S), kNoTrans, y_f, kNoTrans, 1.0); // the tanh-squashed version of c y_h.Tanh(y_c); */ r1 = r1 * r2 + r3; // r1 = yi, r2 = yg, r3 = yf * ycr, r1 = yc (after) // clip cell memory r1 = APPLY_CUDA_MAX_GRADIENT(r1, max_grad); r4 = __expf(2.0f * r1); if(isinf(r4)) { r2 = 1.0f; } else { r2 = (r4 - 1.0f) / (r4 + 1.0f); } //yh_t = tanh(yh_t); // single precision /* // output gate y_o.AddMatDiagVec(1.0, y_c, kNoTrans, phole_o_c_fw_, 1.0); y_o.Sigmoid(y_o); // the final output y_m.AddMatDotMat(1.0, y_h, kNoTrans, y_o, kNoTrans, 0.0); */ r3 = r1 * po[j] + yo[index]; // OLD: r3 = 1.0f / (1.0f + __expf(-r3)); // single precision r3 = __expf(-r3); if (isinf(r3)) { r3 = 0.0f; } else { r3 = 1.0f / (1.0f + r3); // single precision } ym[index] = r2 * r3; //r2= yh from above yc[index] = r1; yh[index] = r2; yo[index] = r3; } } template<typename Real> __global__ static void _propagate_lstm_pointwiseops(Real *yi, Real *yf, Real *yg, Real *yo, Real *yc, Real *yh, Real *ym, const Real *ycr, const Real *pi, const Real *pf, const Real *po, const Real *rm, MatrixDim mat_dim, int mat2_row_stride, int mat2_col_stride, const bool nml, const Real max_grad = 100 ) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j; if (i < mat_dim.rows && j < mat_dim.cols) { /* // input gate y_i.AddMatDiagVec(1.0, YC.RowRange((t-1)*S,S), kNoTrans, phole_i_c_fw_, 1.0); // forget gate y_f.AddMatDiagVec(1.0, YC.RowRange((t-1)*S,S), kNoTrans, phole_f_c_fw_, 1.0); // apply sigmoid/tanh functionis to squash the outputs y_i.Sigmoid(y_i); y_f.Sigmoid(y_f); */ Real r1 = ycr[index]; //3 Real r2 = yi[index]; //5 Real r3 = yf[index]; //5 Real r4; // input gate r2 = r1 * pi[j] + r2; // OLD: r2 = 1.0f / (1.0f + __expf(-r2)); //1.0 / (1.0 + expf(-ycr_t * pi[j] - yi_t)); // single precision r2 = __expf(-r2); if (isinf(r2)) { r2 = 0.0f; } else { r2 = 1.0f / (1.0f + r2); //1.0 / (1.0 + expf(-ycr_t * pi[j] - yi_t)); // single precision } yi[index] = r2; // forget gate r3 = r1 * pf[j] + r3; // OLD: r3 = 1.0f / (1.0f + __expf(-r3)); //(1.0 + expf(-ycr_t * pf[j] - yf_t)); // single precision r3 = __expf(-r3); if (isinf(r3)) { r3 = 0.0f; } else { r3 = 1.0f / (1.0f + r3); //(1.0 + expf(-ycr_t * pf[j] - yf_t)); // single precision } yf[index] = r3; r3 = r3 * r1; // input tanh y_g.Tanh(y_g); r4 = __expf(2.0f * yg[index]); if(isinf(r4)) { r1 = 1.0f; } else { r1 = (r4 - 1.0f) / (r4 + 1.0f); } //yg_t = tanhf(yg_t); // single precision yg[index] = r1; /* // memory cell y_c.AddMatDotMat(1.0, y_g, kNoTrans, y_i, kNoTrans, 0.0); if (no_mem_loss_dropout) y_c.AddMatDotMat(1.0, r_mask, kNoTrans, y_c, kNoTrans, 0.0); y_c.AddMatDotMat(1.0, YC.RowRange((t-1)*S,S), kNoTrans, y_f, kNoTrans, 1.0); if (rnndrop) y_c.AddMatDotMat(1.0, r_mask, kNoTrans, y_c, kNoTrans, 0.0); // the tanh-squashed version of c y_h.Tanh(y_c); */ if (nml) r1 = rm[i * mat2_row_stride + j] * r2 * r1 + r3; // r1 = yi, r2 = yg, r3 = yf * ycr, r1 = yc (after) else // then rnndrop r1 = rm[i * mat2_row_stride + j] * (r2 * r1 + r3); // r1 = yi, r2 = yg, r3 = yf * ycr, r1 = yc (after) // clip cell memory r1 = APPLY_CUDA_MAX_GRADIENT(r1, max_grad); r4 = __expf(2.0f * r1); if(isinf(r4)) { r2 = 1.0f; } else { r2 = (r4 - 1.0f) / (r4 + 1.0f); } //yh_t = tanh(yh_t); // single precision /* // output gate y_o.AddMatDiagVec(1.0, y_c, kNoTrans, phole_o_c_fw_, 1.0); y_o.Sigmoid(y_o); // the final output y_m.AddMatDotMat(1.0, y_h, kNoTrans, y_o, kNoTrans, 0.0); */ r3 = r1 * po[j] + yo[index]; // OLD: r3 = 1.0f / (1.0f + __expf(-r3)); // single precision r3 = __expf(-r3); if (isinf(r3)) { r3 = 0.0f; } else { r3 = 1.0f / (1.0f + r3); // single precision } ym[index] = r2 * r3; yc[index] = r1; yh[index] = r2; yo[index] = r3; } } template<typename Real> __global__ static void _backpropagate_lstm_pointwiseops_nodrop(const Real *yi, const Real *yf, const Real *yg, const Real *yo, const Real *yc, const Real *yh, const Real *ym, Real *di, Real *df, Real *dg, Real *d_o, Real *dc, Real *dh, Real *dm, Real *dcm, const Real *dir, const Real *dfr, const Real *dcr, const Real *dcmr, const Real *yfr, const Real *ycr, const Real *pi, const Real *pf, const Real *po, MatrixDim mat_dim, const Real max_grad = 100 ) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j; if (i < mat_dim.rows && j < mat_dim.cols) { /* { // d_h d_h.AddMatDotMat(1.0, d_m, kNoTrans, y_o, kNoTrans, 0.0); d_h.DiffTanh(y_h, d_h); // d_o d_o.AddMatDotMat(1.0, d_m, kNoTrans, y_h, kNoTrans, 0.0); d_o.DiffSigmoid(y_o, d_o); } // d_c d_c.AddMat(1.0, d_h); d_c.AddMatDiagVec(1.0, DI.RowRange((t+1)*S,S), kNoTrans, phole_i_c_fw_, 1.0); d_c.AddMatDiagVec(1.0, DF.RowRange((t+1)*S,S), kNoTrans, phole_f_c_fw_, 1.0); d_c.AddMatDiagVec(1.0, d_o, kNoTrans, phole_o_c_fw_, 1.0); // d_f d_f.AddMatDotMat(1.0, d_c, kNoTrans, YC.RowRange((t-1)*S,S), kNoTrans, 0.0); d_f.DiffSigmoid(y_f, d_f); // d_i d_i.AddMatDotMat(1.0, d_c_m, kNoTrans, y_g, kNoTrans, 0.0); d_i.DiffSigmoid(y_i, d_i); // d_g d_g.AddMatDotMat(1.0, d_c_m, kNoTrans, y_i, kNoTrans, 0.0); d_g.DiffTanh(y_g, d_g); */ Real r1 = dm[index]; // 2 Real r2 = yo[index]; //3 Real r3 = yh[index]; //3 Real r4; // Clip dm if needed r1 = APPLY_CUDA_MAX_GRADIENT(r1, max_grad); dm[index] = r1; r4 = (1.0f - r3 * r3) * r1 * r2; // r4 = dh r2 = (1.0f - r2) * r2 * r1 * r3; // r2 = do r4 = APPLY_CUDA_MAX_GRADIENT(r4, max_grad); r2 = APPLY_CUDA_MAX_GRADIENT(r2, max_grad); dh[index] = r4; d_o[index] = r2; r1 = dc[index]; //r1 = dc r3 = yf[index]; // r3 = yf r1 = r4 + dir[index] * pi[j] + dfr[index] * pf[j] + r2 * po[j] + dcr[index] * yfr[index] + r1 ; //r1 = dc r2 = (1.0f - r3) * r3 * r1 * ycr[index]; // r2 = df r1 = APPLY_CUDA_MAX_GRADIENT(r1, max_grad); r2 = APPLY_CUDA_MAX_GRADIENT(r2, max_grad); df[index] = r2; r2 = yi[index]; // r2 = yi r3 = yg[index]; // r3 = yg r4 = (1.0f - r2) * r2 * r1 * r3; di[index] = APPLY_CUDA_MAX_GRADIENT(r4, max_grad); //(1.0f - r2) * r2 * r1 * r3; r4 = (1.0f - r3 * r3) * r1 * r2; dg[index] = APPLY_CUDA_MAX_GRADIENT(r4, max_grad); //(1.0f - r3 * r3) * r1 * r2; dc[index] = r1; } } template<typename Real> __global__ static void _backpropagate_lstm_pointwiseops(const Real *yi, const Real *yf, const Real *yg, const Real *yo, const Real *yc, const Real *yh, const Real *ym, Real *di, Real *df, Real *dg, Real *d_o, Real *dc, Real *dh, Real *dm, Real *dcm, const Real *dir, const Real *dfr, const Real *dcr, const Real *dcmr, const Real *yfr, const Real *ycr, const Real *pi, const Real *pf, const Real *po, const Real *rm, MatrixDim mat_dim, int mat2_row_stride, int mat2_col_stride, const bool nml, const Real max_grad = 100 ) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j; if (i < mat_dim.rows && j < mat_dim.cols) { /* { // d_h d_h.AddMatDotMat(1.0, d_m, kNoTrans, y_o, kNoTrans, 0.0); d_h.DiffTanh(y_h, d_h); // d_o d_o.AddMatDotMat(1.0, d_m, kNoTrans, y_h, kNoTrans, 0.0); d_o.DiffSigmoid(y_o, d_o); } // d_c d_c.AddMat(1.0, d_h); d_c.AddMatDiagVec(1.0, DI.RowRange((t+1)*S,S), kNoTrans, phole_i_c_fw_, 1.0); d_c.AddMatDiagVec(1.0, DF.RowRange((t+1)*S,S), kNoTrans, phole_f_c_fw_, 1.0); d_c.AddMatDiagVec(1.0, d_o, kNoTrans, phole_o_c_fw_, 1.0); if (rnndrop) { d_c.AddMatDotMat(1.0, DCM.RowRange((t+1)*S,S), kNoTrans, YF.RowRange((t+1)*S,S), kNoTrans, 1.0); d_c_m.AddMatDotMat(1.0, d_c, kNoTrans, r_mask, kNoTrans, 0.0); } if (no_mem_loss_dropout) { d_c.AddMatDotMat(1.0, DC.RowRange((t+1)*S,S), kNoTrans, YF.RowRange((t+1)*S,S), kNoTrans, 1.0); d_c_m.AddMatDotMat(1.0, d_c, kNoTrans, r_mask, kNoTrans, 0.0); } // d_f if (rnndrop ) { d_f.AddMatDotMat(1.0, d_c_m, kNoTrans, YC.RowRange((t-1)*S,S), kNoTrans, 0.0); } else { d_f.AddMatDotMat(1.0, d_c, kNoTrans, YC.RowRange((t-1)*S,S), kNoTrans, 0.0); } d_f.DiffSigmoid(y_f, d_f); // d_i d_i.AddMatDotMat(1.0, d_c_m, kNoTrans, y_g, kNoTrans, 0.0); d_i.DiffSigmoid(y_i, d_i); // d_g d_g.AddMatDotMat(1.0, d_c_m, kNoTrans, y_i, kNoTrans, 0.0); d_g.DiffTanh(y_g, d_g); */ Real r1 = dm[index]; // 2 Real r2 = yo[index]; //3 Real r3 = yh[index]; //3 Real r4; // Clip dm if needed r1 = APPLY_CUDA_MAX_GRADIENT(r1, max_grad); // 2 dm[index] = r1; r4 = (1.0f - r3 * r3) * r1 * r2; // r4 = dh r2 = (1.0f - r2) * r2 * r1 * r3; // r2 = do r4 = APPLY_CUDA_MAX_GRADIENT(r4, max_grad); r2 = APPLY_CUDA_MAX_GRADIENT(r2, max_grad); r3 = dc[index]; //r3 = dc r3 = r4 + dir[index] * pi[j] + dfr[index] * pf[j] + r2 * po[j] + r3; //r3 = dc dh[index] = r4; d_o[index] = r2; if (nml) { r3 = dcr[index] * yfr[index] + r3; //r3 = dc r3 = APPLY_CUDA_MAX_GRADIENT(r3, max_grad); r1 = r3 * rm[i * mat2_row_stride + j]; //r1 = dcm r2 = r3 * ycr[index]; // r2 = df } else { r3 = dcmr[i * mat2_row_stride + j] * yfr[index] + r3; //r3 = dc r3 = APPLY_CUDA_MAX_GRADIENT(r3, max_grad); r1 = r3 * rm[i * mat2_row_stride + j]; //r1 = dcm r2 = r1 * ycr[index]; //r2 = df } dc[index] = r3; // we clip in the conditional above r3 = yf[index]; // r3 = yf r2 = (1.0f - r3) * r3 * r2; // r2 = df r2 = APPLY_CUDA_MAX_GRADIENT(r2, max_grad); df[index] = r2; r2 = yi[index]; // r2 = yi r3 = yg[index]; // r3 = yg r4 = (1.0f - r2) * r2 * r1 * r3; di[index] = APPLY_CUDA_MAX_GRADIENT(r4, max_grad); //(1.0f - r2) * r2 * r1 * r3; r4 = (1.0f - r3 * r3) * r1 * r2; dg[index] = APPLY_CUDA_MAX_GRADIENT(r4, max_grad); //(1.0f - r3 * r3) * r1 * r2; dcm[i * mat2_row_stride + j] = r1; // don't clip dcm since we clip dc in conditional } } void cudaF_propagate_lstm_pointwiseops(dim3 Gr, dim3 Bl, float *yi, float *yf, float *yg, float *yo, float *yc, float *yh, float *ym, const float *ycr, const float *pi, const float *pf, const float *po, const float *rm, MatrixDim mat_dim, int mat2_row_stride, int mat2_col_stride, const bool nml, hipStream_t &stream ){ hipLaunchKernelGGL(( _propagate_lstm_pointwiseops), dim3(Gr),dim3(Bl),0,stream, yi, yf, yg, yo, yc, yh, ym, ycr, pi, pf, po, rm, mat_dim, mat2_row_stride, mat2_col_stride, nml); } void cudaF_propagate_lstm_pointwiseops_nodrop(dim3 Gr, dim3 Bl, float *yi, float *yf, float *yg, float *yo, float *yc, float *yh, float *ym, const float *ycr, const float *pi, const float *pf, const float *po, MatrixDim mat_dim, hipStream_t &stream ){ hipLaunchKernelGGL(( _propagate_lstm_pointwiseops_nodrop), dim3(Gr),dim3(Bl),0,stream, yi, yf, yg, yo, yc, yh, ym, ycr, pi, pf, po, mat_dim); } void cudaF_backpropagate_lstm_pointwiseops(dim3 Gr, dim3 Bl, const float *yi, const float *yf, const float *yg, const float *yo, const float *yc, const float *yh, const float *ym, float *di, float *df, float *dg, float *d_o, float *dc, float *dh, float *dm, float *dcm, const float *dir, const float *dfr, const float *dcr, const float *dcmr, const float *yfr, const float *ycr, const float *pi, const float *pf, const float *po, const float *rm, MatrixDim mat_dim, int mat2_row_stride, int mat2_col_stride, const bool nml, hipStream_t &stream ) { hipLaunchKernelGGL(( _backpropagate_lstm_pointwiseops), dim3(Gr),dim3(Bl),0,stream, yi, yf, yg, yo, yc, yh, ym, di, df, dg, d_o, dc, dh, dm, dcm, dir, dfr, dcr, dcmr, yfr, ycr, pi, pf, po, rm, mat_dim, mat2_row_stride, mat2_col_stride, nml); } void cudaF_backpropagate_lstm_pointwiseops_nodrop(dim3 Gr, dim3 Bl, const float *yi, const float *yf, const float *yg, const float *yo, const float *yc, const float *yh, const float *ym, float *di, float *df, float *dg, float *d_o, float *dc, float *dh, float *dm, float *dcm, const float *dir, const float *dfr, const float *dcr, const float *dcmr, const float *yfr, const float *ycr, const float *pi, const float *pf, const float *po, MatrixDim mat_dim, hipStream_t &stream ) { hipLaunchKernelGGL(( _backpropagate_lstm_pointwiseops_nodrop), dim3(Gr),dim3(Bl),0,stream, yi, yf, yg, yo, yc, yh, ym, di, df, dg, d_o, dc, dh, dm, dcm, dir, dfr, dcr, dcmr, yfr, ycr, pi, pf, po, mat_dim ); } template<typename Real> __global__ static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim, const Real *mat2, int mat2_row_stride, int mat2_col_stride, const Real *vec, Real beta) { // Note from Dan: in this kernel, we make the x dimension correspond to the // row index and y to the column index. That was not always the case for // earlier kernels written by others. int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j, index2 = i * mat2_row_stride + j * mat2_col_stride; if (i < mat_dim.rows && j < mat_dim.cols) { mat[index] = alpha * mat2[index2] * vec[j] + beta * mat[index]; } } template<typename Real> __global__ static void _add_mat_dot_mat(Real *data, const Real *srcA_data, const Real *srcB_data, int trasA, int transB, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { // 1 represents kTrans, 0 represents kNoTrans // but for now, only kNoTrans is availiable int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j*dim.stride; int32_cuda srcA_index = i + j*srcA_stride; int32_cuda srcB_index = i + j*srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha*srcA_data[srcA_index]*srcB_data[srcB_index] + beta * data[tgt_index] ; } } void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *mat2, int mat2_row_stride, int mat2_col_stride, const float *vec, float beta) { hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *mat2, int mat2_row_stride, int mat2_col_stride, const double *vec, double beta) { hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaF_add_mat_dot_mat(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, int transA, int transB, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { hipLaunchKernelGGL(( _add_mat_dot_mat), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, transA, transB, dim, srcA_stride, srcB_stride, alpha, beta); } void cudaD_add_mat_dot_mat(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, int transA, int transB, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { hipLaunchKernelGGL(( _add_mat_dot_mat), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, transA, transB, dim, srcA_stride, srcB_stride, alpha, beta); } /* * All the following kernels are written by Yajie Miao for CTC training */ template<typename Real> __global__ static void _compute_ctc_alpha_one_sequence(Real* mat_alpha, int row, MatrixDim dim_alpha, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda dim = dim_alpha.cols; if (i < dim) { int32_cuda index_alpha = i + row * dim_alpha.stride; int32_cuda class_idx = labels[i]; int32_cuda index_prob = class_idx + row * dim_prob.stride; int32_cuda index_alpha_rm1_i = i + (row - 1) * dim_alpha.stride; int32_cuda index_alpha_rm1_im1 = (i - 1) + (row - 1) * dim_alpha.stride; int32_cuda index_alpha_rm1_im2 = (i - 2) + (row - 1) * dim_alpha.stride; if (row == 0) { if (i < 2) mat_alpha[index_alpha] = mat_prob[index_prob]; else mat_alpha[index_alpha] = NumericLimits<Real>::log_zero_; } else { if (i > 1) { if (i % 2 == 0 || labels[i-2] == labels[i]) { mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i])); } else { Real tmp = LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]); mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im2], tmp)); } } else if (i == 1) { mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i])); } else { mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], mat_alpha[index_alpha_rm1_i]); } } } } template<typename Real> __global__ static void _compute_ctc_alpha_multiple_sequence(Real* mat_alpha, int sequence_num, int row, MatrixDim dim_alpha, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels, int32_cuda dim_label_stride, const int32_cuda* seq_lengths) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index, that is, the index for sequence int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // label index, cannot exceed 2*|l|+1 int32_cuda dim = dim_alpha.cols; if (j >= dim || i >= sequence_num) return; int32_cuda index_alpha = j + (row * sequence_num + i) * dim_alpha.stride; int32_cuda index_label = j + i * dim_label_stride; int32_cuda class_idx = labels[index_label];// if -1, this is the padding cell;labels now is a matrix which has the same size as mat_alpha if (class_idx == -1 || row >= seq_lengths[i]) { mat_alpha[index_alpha] = NumericLimits<Real>::log_zero_; return; } int32_cuda index_label_m2 = (j-2) + i * dim_label_stride; int32_cuda index_prob = class_idx + (row * sequence_num + i) * dim_prob.stride; int32_cuda index_alpha_rm1_i = j + ((row-1) * sequence_num + i) * dim_alpha.stride; int32_cuda index_alpha_rm1_im1 = (j-1) + ((row-1) * sequence_num + i) * dim_alpha.stride; int32_cuda index_alpha_rm1_im2 = (j-2) + ((row-1) * sequence_num + i) * dim_alpha.stride; if (row == 0) { if (j < 2) mat_alpha[index_alpha] = mat_prob[index_prob]; else mat_alpha[index_alpha] = NumericLimits<Real>::log_zero_; } else { if (j > 1) { if (j % 2 == 0 || labels[index_label_m2] == labels[index_label]) { mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i])); } else { Real tmp = LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]); mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im2], tmp)); } } else if (j == 1) { mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i])); } else { mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], mat_alpha[index_alpha_rm1_i]); } } } template<typename Real> __global__ static void _compute_ctc_alpha_one_sequence_rescale(Real* mat_alpha, int row, MatrixDim dim_alpha, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda dim = dim_alpha.cols; if (i < dim) { int32_cuda index_alpha = i + row * dim_alpha.stride; int32_cuda class_idx = labels[i]; int32_cuda index_prob = class_idx + row * dim_prob.stride; int32_cuda index_alpha_rm1_i = i + (row - 1) * dim_alpha.stride; int32_cuda index_alpha_rm1_im1 = (i - 1) + (row - 1) * dim_alpha.stride; int32_cuda index_alpha_rm1_im2 = (i - 2) + (row - 1) * dim_alpha.stride; if (row == 0) { if (i < 2) mat_alpha[index_alpha] = mat_prob[index_prob]; else mat_alpha[index_alpha] = 0.0; } else { if (i > 1) { if (i % 2 == 0 || labels[i-2] == labels[i]) { mat_alpha[index_alpha] = mat_prob[index_prob] * (mat_alpha[index_alpha_rm1_im1] + mat_alpha[index_alpha_rm1_i]); } else { mat_alpha[index_alpha] = mat_prob[index_prob] * (mat_alpha[index_alpha_rm1_im1] + mat_alpha[index_alpha_rm1_i] + mat_alpha[index_alpha_rm1_im2]); } } else if (i == 1) { mat_alpha[index_alpha] = mat_prob[index_prob] * (mat_alpha[index_alpha_rm1_im1] + mat_alpha[index_alpha_rm1_i]); } else { mat_alpha[index_alpha] = mat_prob[index_prob] * mat_alpha[index_alpha_rm1_i]; } } } } template<typename Real> __global__ static void _compute_ctc_beta_one_sequence(Real* mat_beta, int row, MatrixDim dim_beta, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda dim = dim_beta.cols; if (i < dim) { int32_cuda index_beta = i + row * dim_beta.stride; int32_cuda class_idx = labels[i]; int32_cuda index_prob = class_idx + row * dim_prob.stride; int32_cuda index_beta_rp1_i = i + (row + 1) * dim_beta.stride; int32_cuda index_beta_rp1_ip1 = (i + 1) + (row + 1) * dim_beta.stride; int32_cuda index_beta_rp1_ip2 = (i + 2) + (row + 1) * dim_beta.stride; int32_cuda row_num = dim_beta.rows; if (row == row_num - 1) { if (i > dim - 3) mat_beta[index_beta] = mat_prob[index_prob]; else mat_beta[index_beta] = NumericLimits<Real>::log_zero_; } else { if (i < dim - 2) { if (i % 2 == 0 || labels[i+2] == labels[i]) { mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i])); } else { Real tmp = LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]); mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip2], tmp)); } } else if (i == dim - 2) { mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i])); } else { mat_beta[index_beta] = AddAB(mat_prob[index_prob], mat_beta[index_beta_rp1_i]); } } } } template<typename Real> __global__ static void _compute_ctc_beta_multiple_sequence(Real* mat_beta, int sequence_num, int row, MatrixDim dim_beta, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels, int32_cuda dim_label_stride, const int32_cuda* seq_lengths, const int32_cuda* label_lengths) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index, that is, the index for sequence int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // label index, cannot exceed 2*|l|+1 int32_cuda dim = dim_beta.cols; if (j >= dim || i >= sequence_num) return; int32_cuda index_beta = j + (row * sequence_num + i) * dim_beta.stride; int32_cuda index_label = j + i * dim_label_stride; int32_cuda class_idx = labels[index_label];// if -1, this is the padding cell;labels now is a matrix which has the same size as mat_alpha if (class_idx == -1 || row >= seq_lengths[i]) { mat_beta[index_beta] = NumericLimits<Real>::log_zero_; return; } int32_cuda index_label_p2 = (j+2) + i * dim_label_stride; int32_cuda index_prob = class_idx + (row * sequence_num + i) * dim_prob.stride; int32_cuda index_beta_rp1_i = j + ((row+1) * sequence_num + i) * dim_beta.stride; int32_cuda index_beta_rp1_ip1 = (j+1) + ((row+1) * sequence_num + i) * dim_beta.stride; int32_cuda index_beta_rp1_ip2 = (j+2) + ((row+1) * sequence_num + i) * dim_beta.stride; int32_cuda row_num = seq_lengths[i]; int32_cuda label_len = label_lengths[i]; /* if (row == row_num - 1) { if (j > dim - 3) mat_beta[index_beta] = mat_prob[index_prob]; else mat_beta[index_beta] = NumericLimits<Real>::log_zero_; } else { if (j < dim - 2) { if (j % 2 == 0 || labels[index_label_p2] == labels[index_label]) { mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i])); } else { Real tmp = LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]); mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip2], tmp)); } } else if (j == dim - 2) { mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i])); } else { mat_beta[index_beta] = AddAB(mat_prob[index_prob], mat_beta[index_beta_rp1_i]); } } */ if (row == row_num - 1) { if (j > label_len - 3) mat_beta[index_beta] = mat_prob[index_prob]; else mat_beta[index_beta] = NumericLimits<Real>::log_zero_; } else { if (j < label_len - 2) { if (j % 2 == 0 || labels[index_label_p2] == labels[index_label]) { mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i])); } else { Real tmp = LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]); mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip2], tmp)); } } else if (j == label_len - 2) { mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i])); } else { mat_beta[index_beta] = AddAB(mat_prob[index_prob], mat_beta[index_beta_rp1_i]); } } } template<typename Real> __global__ static void _compute_ctc_beta_one_sequence_rescale(Real* mat_beta, int row, MatrixDim dim_beta, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda dim = dim_beta.cols; if (i < dim) { int32_cuda index_beta = i + row * dim_beta.stride; int32_cuda class_idx = labels[i]; int32_cuda index_prob = class_idx + row * dim_prob.stride; int32_cuda index_beta_rp1_i = i + (row + 1) * dim_beta.stride; int32_cuda index_beta_rp1_ip1 = (i + 1) + (row + 1) * dim_beta.stride; int32_cuda index_beta_rp1_ip2 = (i + 2) + (row + 1) * dim_beta.stride; int32_cuda row_num = dim_beta.rows; if (row == row_num - 1) { if (i > dim - 3) mat_beta[index_beta] = mat_prob[index_prob]; else mat_beta[index_beta] = 0; } else { if (i < dim - 2) { if (i % 2 == 0 || labels[i+2] == labels[i]) { mat_beta[index_beta] = mat_prob[index_prob] * (mat_beta[index_beta_rp1_ip1] + mat_beta[index_beta_rp1_i]); } else { mat_beta[index_beta] = mat_prob[index_prob] * (mat_beta[index_beta_rp1_ip1] + mat_beta[index_beta_rp1_i] + mat_beta[index_beta_rp1_ip2]); } } else if (i == dim - 2) { mat_beta[index_beta] = mat_prob[index_prob] * (mat_beta[index_beta_rp1_ip1] + mat_beta[index_beta_rp1_i]); } else { mat_beta[index_beta] = mat_prob[index_prob] * mat_beta[index_beta_rp1_i]; } } } } // mat_prob are in probability scale. template<typename Real> __global__ static void _compute_ctc_error_one_sequence(Real* mat_error, MatrixDim dim_error, const Real* mat_alpha, const Real* mat_beta, MatrixDim dim_alpha, const Real* mat_prob, const int32_cuda* labels, Real pzx) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index if (i < dim_error.rows && j < dim_error.cols) { Real err = NumericLimits<Real>::log_zero_; int32_cuda index_error = i * dim_error.stride + j; for(int s = 0; s < dim_alpha.cols; s++) { if (labels[s] == j) { // int32_cuda index_alpha = i * dim_alpha.stride + s; err = LogAPlusB(err, AddAB(mat_alpha[index_alpha], mat_beta[index_alpha])); } } Real val = ExpA(SubAB(err, AddAB(pzx, mat_prob[index_error] == 0? NumericLimits<Real>::log_zero_ : 2*log(mat_prob[index_error])))); mat_error[index_error] = -1.0 * val; } } // mat_prob are in probability scale. template<typename Real> __global__ static void _compute_ctc_error_multiple_sequence(Real* mat_error, int32_cuda sequence_num, MatrixDim dim_error, const Real* mat_alpha, const Real* mat_beta, MatrixDim dim_alpha, const Real* mat_prob, const int32_cuda* labels, int32_cuda dim_label_stride, const int32_cuda* seq_lengths, const Real* pzx) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index if (i >= dim_error.rows || j >= dim_error.cols) return; int32_cuda seqX = i % sequence_num; int32_cuda rowX = i / sequence_num; if (rowX >= seq_lengths[seqX]) return; Real err = NumericLimits<Real>::log_zero_; int32_cuda index_error = i * dim_error.stride + j; for(int s = 0; s < dim_alpha.cols; s++) { int32_cuda index_label = s + seqX * dim_label_stride; if (labels[index_label] == -1) {continue;} if (labels[index_label] == j) { // int32_cuda index_alpha = i * dim_alpha.stride + s; err = LogAPlusB(err, AddAB(mat_alpha[index_alpha], mat_beta[index_alpha])); } } Real val = ExpA(SubAB(err, AddAB(pzx[seqX], mat_prob[index_error] == 0? NumericLimits<Real>::log_zero_ : 2*log(mat_prob[index_error])))); mat_error[index_error] = -1.0 * val; } template<typename Real> __global__ static void _distribute_prob_by_label(Real* mat_prob_dist, MatrixDim dim_prob_dist, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index if (i < dim_prob_dist.rows && j < dim_prob_dist.cols) { int32_cuda index_prob_dist = i * dim_prob_dist.stride + j; int32_cuda index_prob = i * dim_prob.stride + labels[j]; mat_prob_dist[index_prob_dist] = mat_prob[index_prob]; } } // directly get the errors for the prior-softmax values template<typename Real> __global__ static void _compute_ctc_error_one_sequence_rescale(Real* mat_error, MatrixDim dim_error, const Real* mat_alpha, const Real* mat_beta, MatrixDim dim_alpha, const Real* mat_prob, const int32_cuda* labels, const Real* zt) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index if (i < dim_error.rows && j < dim_error.cols) { Real err = 0; int32_cuda index_error = i * dim_error.stride + j; for(int s = 0; s < dim_alpha.cols; s++) { if (labels[s] == j) { // int32_cuda index_alpha = i * dim_alpha.stride + s; err += mat_alpha[index_alpha] * mat_beta[index_alpha]; } } if (mat_prob[index_error] == 0 || zt[i] == 0) { mat_error[index_error] = 0; } else { mat_error[index_error] = mat_prob[index_error] - (err / zt[i]) / mat_prob[index_error]; } } } void cudaF_compute_ctc_alpha(dim3 Gr, dim3 Bl, float *alpha, int row_idx, MatrixDim dim_alpha, const float *prob, MatrixDim dim_prob, const int *labels) { hipLaunchKernelGGL(( _compute_ctc_alpha_one_sequence), dim3(Gr), dim3(Bl), 0, 0, alpha, row_idx, dim_alpha, prob, dim_prob, labels); } void cudaF_compute_ctc_beta(dim3 Gr, dim3 Bl, float *beta, int row_idx, MatrixDim dim_beta, const float *prob, MatrixDim dim_prob, const int *labels) { hipLaunchKernelGGL(( _compute_ctc_beta_one_sequence), dim3(Gr), dim3(Bl), 0, 0, beta, row_idx, dim_beta, prob, dim_prob, labels); } void cudaF_compute_ctc_error(dim3 Gr, dim3 Bl, float *error, MatrixDim dim_error, const float *alpha, const float *beta, MatrixDim dim_alpha, const float *prob, const int *labels, float pzx) { hipLaunchKernelGGL(( _compute_ctc_error_one_sequence), dim3(Gr), dim3(Bl), 0, 0, error, dim_error, alpha, beta, dim_alpha, prob, labels, pzx); } void cudaF_compute_ctc_alpha_rescale(dim3 Gr, dim3 Bl, float *alpha, int row_idx, MatrixDim dim_alpha, const float *prob, MatrixDim dim_prob, const int *labels) { hipLaunchKernelGGL(( _compute_ctc_alpha_one_sequence_rescale), dim3(Gr), dim3(Bl), 0, 0, alpha, row_idx, dim_alpha, prob, dim_prob, labels); } void cudaF_compute_ctc_beta_rescale(dim3 Gr, dim3 Bl, float *beta, int row_idx, MatrixDim dim_beta, const float *prob, MatrixDim dim_prob, const int *labels) { hipLaunchKernelGGL(( _compute_ctc_beta_one_sequence_rescale), dim3(Gr), dim3(Bl), 0, 0, beta, row_idx, dim_beta, prob, dim_prob, labels); } void cudaF_compute_ctc_error_rescale(dim3 Gr, dim3 Bl, float *error, MatrixDim dim_error, const float *alpha, const float *beta, MatrixDim dim_alpha, const float *prob, const int *labels, const float *zt) { hipLaunchKernelGGL(( _compute_ctc_error_one_sequence_rescale), dim3(Gr), dim3(Bl), 0, 0, error, dim_error, alpha, beta, dim_alpha, prob, labels, zt); } void cudaF_distribute_prob_by_label(dim3 Gr, dim3 Bl, float *prob_dist, MatrixDim dim_prob_dist, const float *prob, MatrixDim dim_prob, const int *labels) { hipLaunchKernelGGL(( _distribute_prob_by_label), dim3(Gr), dim3(Bl), 0, 0, prob_dist, dim_prob_dist, prob, dim_prob, labels); } void cudaF_compute_ctc_alpha_multiple_sequence(dim3 Gr, dim3 Bl, float *alpha, int seq_num, int row_idx, MatrixDim dim_alpha, const float *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths) { hipLaunchKernelGGL(( _compute_ctc_alpha_multiple_sequence), dim3(Gr), dim3(Bl), 0, 0, alpha, seq_num, row_idx, dim_alpha, prob, dim_prob, labels, dim_label_stride, seq_lengths); } void cudaF_compute_ctc_beta_multiple_sequence(dim3 Gr, dim3 Bl, float *beta, int seq_num, int row_idx, MatrixDim dim_beta, const float *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths, const int *label_lengths) { hipLaunchKernelGGL(( _compute_ctc_beta_multiple_sequence), dim3(Gr), dim3(Bl), 0, 0, beta, seq_num, row_idx, dim_beta, prob, dim_prob, labels, dim_label_stride, seq_lengths, label_lengths); } void cudaF_compute_ctc_error_multiple_sequence(dim3 Gr, dim3 Bl, float *error, int seq_num, MatrixDim dim_error, const float *alpha, const float *beta, MatrixDim dim_alpha, const float *prob, const int *labels, int dim_label_stride, const int *seq_lengths, const float *pzx) { hipLaunchKernelGGL(( _compute_ctc_error_multiple_sequence), dim3(Gr), dim3(Bl), 0, 0, error, seq_num, dim_error, alpha, beta, dim_alpha, prob, labels, dim_label_stride, seq_lengths, pzx); } void cudaD_compute_ctc_alpha(dim3 Gr, dim3 Bl, double *alpha, int row_idx, MatrixDim dim_alpha, const double *prob, MatrixDim dim_prob, const int *labels) { hipLaunchKernelGGL(( _compute_ctc_alpha_one_sequence), dim3(Gr), dim3(Bl), 0, 0, alpha, row_idx, dim_alpha, prob, dim_prob, labels); } void cudaD_compute_ctc_beta(dim3 Gr, dim3 Bl, double *beta, int row_idx, MatrixDim dim_beta, const double *prob, MatrixDim dim_prob, const int *labels) { hipLaunchKernelGGL(( _compute_ctc_beta_one_sequence), dim3(Gr), dim3(Bl), 0, 0, beta, row_idx, dim_beta, prob, dim_prob, labels); } void cudaD_compute_ctc_error(dim3 Gr, dim3 Bl, double *error, MatrixDim dim_error, const double *alpha, const double *beta, MatrixDim dim_alpha, const double *prob, const int *labels, double pzx) { hipLaunchKernelGGL(( _compute_ctc_error_one_sequence), dim3(Gr), dim3(Bl), 0, 0, error, dim_error, alpha, beta, dim_alpha, prob, labels, pzx); } void cudaD_compute_ctc_alpha_rescale(dim3 Gr, dim3 Bl, double *alpha, int row_idx, MatrixDim dim_alpha, const double *prob, MatrixDim dim_prob, const int *labels) { hipLaunchKernelGGL(( _compute_ctc_alpha_one_sequence_rescale), dim3(Gr), dim3(Bl), 0, 0, alpha, row_idx, dim_alpha, prob, dim_prob, labels); } void cudaD_compute_ctc_beta_rescale(dim3 Gr, dim3 Bl, double *beta, int row_idx, MatrixDim dim_beta, const double *prob, MatrixDim dim_prob, const int *labels) { hipLaunchKernelGGL(( _compute_ctc_beta_one_sequence_rescale), dim3(Gr), dim3(Bl), 0, 0, beta, row_idx, dim_beta, prob, dim_prob, labels); } void cudaD_compute_ctc_error_rescale(dim3 Gr, dim3 Bl, double *error, MatrixDim dim_error, const double *alpha, const double *beta, MatrixDim dim_alpha, const double *prob, const int *labels, const double *zt) { hipLaunchKernelGGL(( _compute_ctc_error_one_sequence_rescale), dim3(Gr), dim3(Bl), 0, 0, error, dim_error, alpha, beta, dim_alpha, prob, labels, zt); } void cudaD_distribute_prob_by_label(dim3 Gr, dim3 Bl, double *prob_dist, MatrixDim dim_prob_dist, const double *prob, MatrixDim dim_prob, const int *labels) { hipLaunchKernelGGL(( _distribute_prob_by_label), dim3(Gr), dim3(Bl), 0, 0, prob_dist, dim_prob_dist, prob, dim_prob, labels); } void cudaD_compute_ctc_alpha_multiple_sequence(dim3 Gr, dim3 Bl, double *alpha, int seq_num, int row_idx, MatrixDim dim_alpha, const double *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths) { hipLaunchKernelGGL(( _compute_ctc_alpha_multiple_sequence), dim3(Gr), dim3(Bl), 0, 0, alpha, seq_num, row_idx, dim_alpha, prob, dim_prob, labels, dim_label_stride, seq_lengths); } void cudaD_compute_ctc_beta_multiple_sequence(dim3 Gr, dim3 Bl, double *beta, int seq_num, int row_idx, MatrixDim dim_beta, const double *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths, const int *label_lengths) { hipLaunchKernelGGL(( _compute_ctc_beta_multiple_sequence), dim3(Gr), dim3(Bl), 0, 0, beta, seq_num, row_idx, dim_beta, prob, dim_prob, labels, dim_label_stride, seq_lengths, label_lengths); } void cudaD_compute_ctc_error_multiple_sequence(dim3 Gr, dim3 Bl, double *error, int seq_num, MatrixDim dim_error, const double *alpha, const double *beta, MatrixDim dim_alpha, const double *prob, const int *labels, int dim_label_stride, const int *seq_lengths, const double *pzx) { hipLaunchKernelGGL(( _compute_ctc_error_multiple_sequence), dim3(Gr), dim3(Bl), 0, 0, error, seq_num, dim_error, alpha, beta, dim_alpha, prob, labels, dim_label_stride, seq_lengths, pzx); }
c1050c7ce687e61e8e2db52f5513e5835b3dfcf2.cu
// gpucompute/cuda-kernels.cu // Copyright 2009-2012 Karel Vesely // 2013 Ehsan Variani // 2013 Johns Hopkins University (author: Daniel Povey) // 2013 Hainan Xu // 2013 Xiaohui Zhang // 2013 Johns Hopkins University (author: Guoguo Chen) // 2015 Yajie Miao // 2017 Jayadev Billa (added LSTM pointwise ops kernel) // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. // In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers #include <cfloat> #include "cuda-kernels.h" #include "cuPrintf.cuh" #include "cuPrintf.cu" #include "ctc-utils.h" #include "stdio.h" /*********************************************************************** * Generic __device__ functions */ template<typename Real> __device__ static Real _sum_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (sum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x >= halfPoint) { // was < // Get the shared value stored by another thread Real temp = 0.0; if(threadIdx.x < nTotalThreads) { // was +halfPoint temp = buffer[threadIdx.x]; // was +halfPoint } buffer[threadIdx.x - halfPoint] += temp; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static Real _min_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (min) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp < buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two } // the result return buffer[0]; } template<typename Real> __device__ static Real _max_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (max) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (get index of maximum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread Real temp = -1e20; if(threadIdx.x+halfPoint < nTotalThreads) { temp = val[idx[threadIdx.x + halfPoint]]; } if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return idx[0]; } /*********************************************************************** * CUDA kernels * the functions are templated to have the float/double operations */ // for this kernel, following the newer pattern, the x-dim is the row-index, the // y-dim is the col-index. template<typename Real, typename OtherReal> __global__ static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index. int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = j + i * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } // for this kernel, the x-dim is the row-index at the output, the y-dim is the // col-index at the output template<typename Real, typename OtherReal> __global__ static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index out int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index out int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = i + j * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<typename Real> __global__ static void _apply_exp(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if ( i < d.cols && j < d.rows ) { mat[index] = exp(mat[index]); } } template<typename Real> __global__ static void _set_const(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = value; } template<typename Real> __global__ static void _add(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] + value; } template<typename Real> __global__ static void _scale(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] * value; } template<typename Real> __global__ static void _apply_log(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = log(mat[index]+FLT_EPSILON); } template<typename Real> __global__ static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] * A[src_index]; } template<typename Real> __global__ static void _vec_mul_elements(Real* v, const Real* a, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) v[i] = v[i] * a[i]; } template<typename Real> __global__ static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[j]; } template<typename Real> __global__ static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; int32_cuda index_src = i + j*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j *d.stride; int32_cuda index_src = j + i*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*row[i] + beta*dst[index]; } /* * CuVector */ template<typename Real> __global__ static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (double) v_in[i]; } } template<typename Real> __global__ static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if ( i < dim) { v_out[i] = (float) v_in[i]; } } template<typename Real> __global__ static void _vec_min(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= CU1DBLOCK) return; __shared__ Real row_data[CU1DBLOCK]; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real min = 1.0 / 0.0; // infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j < min) min = v_j; } row_data[i] = min; __syncthreads(); //get the sum *value = _min_reduce(row_data); } template<typename Real> __global__ static void _vec_max(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.y > 0) return; __shared__ Real row_data[CU1DBLOCK]; if(i >= CU1DBLOCK) return; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real max = -1.0 / 0.0; // -infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j > max) max = v_j; } row_data[i] = max; __syncthreads(); //get the sum *value = _max_reduce(row_data); } // Adds diag(M N) to v, where M and N are matrices. We supply row_stride and // col_stride arguments for M and N, and swapping them allows us to transpose // those matrices. Note: we imagine row-major indexing here, just like Kaldi // and CBLAS (but unlike CUBLAS). // This kernel expects the blockDim to be (CU1DBLOCK, 1) and the // gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element. // threads_per_element should be a power of 2. template<typename Real> __global__ static void _add_diag_mat_mat( Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride, int M_col_stride, const Real *N, int N_row_stride, int N_col_stride, int threads_per_element, Real beta) { // we actually assume blockDim.x == CU1DBLOCK here. // Each diagonal element of v is processed by "threads_per_element" threads. __shared__ Real temp_data[CU1DBLOCK]; int i = blockIdx.x * blockDim.x + threadIdx.x; int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells // us which block of elements we sum up. if (v_idx >= v_dim) return; Real sum = 0.0; for (int j = sub_idx; j < M_cols; j += threads_per_element) { int M_index = v_idx * M_row_stride + j * M_col_stride, N_index = j * N_row_stride + v_idx * N_col_stride; sum += M[M_index] * N[N_index]; } temp_data[threadIdx.x] = sum; // start_idx = threadIdx.x - sub_idx; // start of the position in temp_data // that we want to sum up. // The following is a tree-based reduction of the elements of temp_data from // start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx". __syncthreads(); int num_total_threads = threads_per_element; while (num_total_threads > 1) { int half_point = ((1 + num_total_threads) >> 1); if (sub_idx < half_point) { Real temp = 0.0; if (sub_idx + half_point < num_total_threads) { temp = temp_data[threadIdx.x + half_point]; } temp_data[threadIdx.x] += temp; } __syncthreads(); num_total_threads = half_point; } if (sub_idx == 0) { v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x]; } } template<typename Real> __global__ static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) v[i] = alpha * x[i] * y[i] + beta * v[i]; } template<typename Real> __global__ static void _vec_apply_exp(Real* v, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v[i] = exp(v[i]); } } template<typename Real> __global__ static void _vec_apply_log(Real* v, Real* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { if (v[i] < 0) { *flag = 1; return; } v[i] = log(v[i]); } } template<typename Real> __global__ static void _vec_sum(Real *v, Real *sum, int dim, int inc) { int i = threadIdx.x; __shared__ Real row_data[CU1DBLOCK]; if (i >= CU1DBLOCK) return; Real tmp_sum = 0; int size = dim / CU1DBLOCK; //the least size in a loop (later part) int threshold = dim - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i * size; loop_end = threshold + (i+1) * size; } for(int j = loop_start; j< loop_end; j++) { tmp_sum += v[j * inc]; } row_data[threadIdx.x] = tmp_sum; __syncthreads(); *sum = _sum_reduce(row_data); } template<typename Real> __global__ static void _pvec_sum(Real* v, Real* g, int dim, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int start = size * i; if (start >= dim) return; int end = start + size; if (end > dim) end = dim; __shared__ Real row_data[CU1DBLOCK]; Real sum = 0; for (int j = start; j < end; j++) sum += v[j]; row_data[threadIdx.x] = sum; __syncthreads(); g[blockIdx.x] = _sum_reduce(row_data); } template<typename Real> __global__ static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] < floor_val) { v[i] = floor_val; count[i] = 1; } else { count[i] = 0; } } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_heaviside(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0); } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_pow(Real* mat, Real power, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { if (power == 1.0) return; if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { if (!(mat[index] >= 0.0)) return; mat[index] = sqrt(mat[index]); } else { mat[index] = pow(mat[index], power); } } } template<typename Real> __global__ static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (mat[index] < floor_val) mat[index] = floor_val; } } template<typename Real> __global__ static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows ) { if (mat[index] > ceiling_val) mat[index] = ceiling_val; } } template<typename Real> __global__ static void _invert_elements(Real* data, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) data[index] = 1.0 / data[index]; } template<typename Real> __global__ static void _sqrt_elements(Real* data, Real epsilon, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) data[index] = sqrt(data[index]+epsilon); } template<typename Real> __global__ static void _add_row_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) { int i = blockIdx.y * blockDim.y + threadIdx.y; //col int j = blockIdx.x * blockDim.x + threadIdx.x; //row if(blockIdx.x > 0) return; if(blockDim.y != 1) return; __shared__ Real row_data[CU1DBLOCK]; //copy the input to row_data row_data[j] = mat[i+j*d.stride]; __syncthreads(); //get the sum Real sum = _sum_reduce(row_data); __syncthreads(); //add to previously accumulated sum if(threadIdx.x == 0) vec_sum[i] += sum; } template<typename Real> __global__ static void _add_col_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; //row int j = blockIdx.y * blockDim.y + threadIdx.y; //col if(blockIdx.x > 0) return; if(blockDim.y != 1) return; __shared__ Real row_data[CU1DBLOCK]; //copy the input to row_data row_data[i] = mat[i+j*d.stride]; __syncthreads(); //get the sum Real sum = _sum_reduce(row_data); __syncthreads(); //add to previously accumulated sum if(threadIdx.x == 0) vec_sum[j] += sum; } template<typename Real> __global__ static void _add_mat_mat_elements(Real *data, const Real *srcA_data, const Real *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j * dim.stride; int32_cuda srcA_index = i + j * srcA_stride; int32_cuda srcB_index = i + j * srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index] + beta * data[tgt_index]; } } /* * cu:: */ template<typename Real> __global__ static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = 1.0 / (1.0 + exp(-x[src_index])); y[dst_index] = res; } } template<typename Real> __global__ static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = y[y_index]*(1.0-y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j * src_stride; if(i < d.cols && j < d.rows) { Real exp_2x = exp(2.0*x[src_index]); Real res; if(isinf(exp_2x)) { res = 1.0; } else { res = (exp_2x - 1.0) / (exp_2x + 1.0); } y[dst_index] = res; } } template<typename Real> __global__ static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (1.0 - y[y_index]*y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; //copy input to aux aux[threadIdx.x] = x[threadIdx.x+j*d.stride]; for(int i=1; i<steps; ++i) { if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride]) aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride]; } //get the maximum value int nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real max = aux[0]; __syncthreads(); // subtract max, apply exp, sum up... y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max); aux[threadIdx.x] = y[threadIdx.x+j*d.stride]; for(int i=1; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max); aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride]; } } nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real sum = aux[0]; __syncthreads(); //normalize by sum... for(int i=0; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum; } } } template<typename Real> __global__ static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = i % d_in.cols; int32_cuda src_row = j + off[i / d_in.cols]; if(src_row < 0) src_row = 0; if(src_row >= d_in.rows) src_row = d_in.rows-1; y[index] = x[src_col + src_row*d_in.stride]; } } template<typename Real> __global__ static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = copy_from[i]; if(src_col >= 0 && src_col < d_in.cols) { y[index] = x[src_col + j*d_in.stride]; } else { y[index] = 1.0/0.0; } } } template<typename Real> __global__ static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_row = copy_from[j]; y[index] = x[i + src_row*d_in.stride]; } } template<typename Real> __global__ static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, grad_index = i + j*stride_grad; if (i < d.cols && j < d.rows) { if(wei[index]==0.0) return; //skip L1 if zero weight! Real l1_signed = l1; if(wei[index] < 0.0) //flip sign l1_signed = -l1; Real before = wei[index]; Real after = wei[index] -lr*grad[grad_index] -l1_signed;//simulate update if((after > 0.0) ^ (before > 0.0)) { //sign changed? wei[index] = 0.0; grad[grad_index] = 0.0; } else { wei[index] -= l1_signed; } } } template<typename Real> __global__ static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(blockIdx.x > 0) return; if(blockDim.y != 1) return; __shared__ Real value[CU1DBLOCK]; __shared__ int32_cuda index[CU1DBLOCK]; //copy to shared memory value[threadIdx.x] = mat[i+j*d.stride]; index[threadIdx.x] = threadIdx.x; __syncthreads(); //get the id of the max value int32_cuda out_max = _max_id_reduce(value, index); __syncthreads(); //see if it's bigger value if(threadIdx.x == 0) { if(vec_val[j] <= mat[out_max+j*d.stride]) { vec_val[j] = mat[out_max+j*d.stride]; vec_id[j] = voff+out_max; } } } /*********************************************************************** * ANSI-C wrappers of CUDA kernels */ /* * "int32" */ void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } /* * CuMatrix */ void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_exp<<<Gr,Bl>>>(mat,d); } void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_exp<<<Gr,Bl>>>(mat,d); } void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) { _apply_pow<<<Gr,Bl>>>(mat, power, d); } void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) { _apply_pow<<<Gr,Bl>>>(mat, power, d); } void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { _apply_floor<<<Gr,Bl>>>(mat, floor_val, d); } void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { _apply_floor<<<Gr,Bl>>>(mat, floor_val, d); } void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_heaviside<<<Gr,Bl>>>(mat, d); } void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_heaviside<<<Gr,Bl>>>(mat, d); } void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) { _apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d); } void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) { _apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d); } void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _scale<<<Gr,Bl>>>(mat,value,d); } void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _scale<<<Gr,Bl>>>(mat,value,d); } void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_log<<<Gr,Bl>>>(mat,d); } void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_log<<<Gr,Bl>>>(mat,d); } void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { _mul_rows_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { _mul_rows_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } else { _add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } } void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } else { _add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } } void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) { _add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d); } void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) { _add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d); } void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { _add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { _add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } /* * CuVector */ void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) { _copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) { _copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) { _copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) { _copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) { _vec_mul_elements<<<Gr,Bl>>>(v, a, dim); } void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) { _vec_mul_elements<<<Gr,Bl>>>(v, a, dim); } void cudaF_vec_min(const float* v, float* value, int dim) { _vec_min<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaD_vec_min(const double* v, double* value, int dim) { _vec_min<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaF_vec_max(const float* v, float* value, int dim) { _vec_max<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaD_vec_max(const double* v, double* value, int dim) { _vec_max<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M, int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride, int N_col_stride, int threads_per_element, float beta) { _add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M, int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride, int N_col_stride, int threads_per_element, double beta) { _add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) { _add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim); } void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) { _add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim); } void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) { _vec_sum<<<Gr,Bl>>>(v, value, dim, inc); } void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) { _vec_sum<<<Gr,Bl>>>(v,value,dim,inc); } void cudaF_pvec_sum(int Gr, int Bl, float* v, float* pvec_sum, int dim, int size) { _pvec_sum<<<Gr,Bl>>>(v, pvec_sum, dim, size); } void cudaD_pvec_sum(int Gr, int Bl, double* v, double* pvec_sum, int dim, int size) { _pvec_sum<<<Gr,Bl>>>(v,pvec_sum,dim,size); } void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) { _vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim); } void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) { _vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim); } void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) { _vec_apply_exp<<<Gr,Bl>>>(v,dim); } void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) { _vec_apply_exp<<<Gr,Bl>>>(v,dim); } void cudaF_sqrt_elements(dim3 Gr, dim3 Bl, float* data, float epsilon, MatrixDim d) { _sqrt_elements<<<Gr,Bl>>>(data, epsilon, d); } void cudaD_sqrt_elements(dim3 Gr, dim3 Bl, double* data, double epsilon, MatrixDim d) { _sqrt_elements<<<Gr,Bl>>>(data, epsilon, d); } void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) { _invert_elements<<<Gr,Bl>>>(data, d); } void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) { _invert_elements<<<Gr,Bl>>>(data, d); } void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) { _vec_apply_log<<<Gr,Bl>>>(v,flag,dim); } void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) { _vec_apply_log<<<Gr,Bl>>>(v,flag,dim); } void cudaF_add_row_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) { _add_row_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d); } void cudaD_add_row_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) { _add_row_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d); } void cudaF_add_col_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) { _add_col_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d); } void cudaD_add_col_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) { _add_col_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d); } /* * cu:: */ void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _sigmoid<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _sigmoid<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { _diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { _diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _tanh<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _tanh<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { _diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { _diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { _softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { _softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { _splice<<<Gr,Bl>>>(y,x,off,d_out,d_in); } void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { _splice<<<Gr,Bl>>>(y,x,off,d_out,d_in); } void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) { _regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad); } void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d,int stride_grad) { _regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad); } void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { _find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d); } void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { _find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d); } /* Some conversion kernels for which it's more convenient to not name them F or D. */ void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } /* * lstm:: Added kernels for LSTM pointwise ops - if you are changing this be cognizant of GPU register usage since it will * affect speed. */ #define APPLY_CUDA_MAX_GRADIENT(x,limit) fminf( fmaxf((x), (-(limit))), (limit) ) template<typename Real> __global__ static void _propagate_lstm_pointwiseops_nodrop(Real *yi, Real *yf, Real *yg, Real *yo, Real *yc, Real *yh, Real *ym, const Real *ycr, const Real *pi, const Real *pf, const Real *po, MatrixDim mat_dim, const Real max_grad = 100 ) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j; if (i < mat_dim.rows && j < mat_dim.cols) { /* // input gate y_i.AddMatDiagVec(1.0, YC.RowRange((t-1)*S,S), kNoTrans, phole_i_c_fw_, 1.0); // forget gate y_f.AddMatDiagVec(1.0, YC.RowRange((t-1)*S,S), kNoTrans, phole_f_c_fw_, 1.0); // apply sigmoid/tanh functionis to squash the outputs y_i.Sigmoid(y_i); y_f.Sigmoid(y_f); */ Real r1 = ycr[index]; //3 Real r2 = yi[index]; //5 Real r3 = yf[index]; //5 Real r4; // input gate r2 = r1 * pi[j] + r2; // OLD: r2 = 1.0f / (1.0f + __expf(-r2)); //1.0 / (1.0 + expf(-ycr_t * pi[j] - yi_t)); // single precision r2 = __expf(-r2); if (isinf(r2)) { r2 = 0.0f; } else { r2 = 1.0f / (1.0f + r2); //1.0 / (1.0 + expf(-ycr_t * pi[j] - yi_t)); // single precision } yi[index] = r2; // forget gate r3 = r1 * pf[j] + r3; // OLD: r3 = 1.0f / (1.0f + __expf(-r3)); //(1.0 + expf(-ycr_t * pf[j] - yf_t)); // single precision r3 = __expf(-r3); if (isinf(r3)) { r3 = 0.0f; } else { r3 = 1.0f / (1.0f + r3); //(1.0 + expf(-ycr_t * pf[j] - yf_t)); // single precision } yf[index] = r3; r3 = r3 * r1; // ycr * yf // input tanh y_g.Tanh(y_g); r4 = __expf(2.0f * yg[index]); if(isinf(r4)) { r1 = 1.0f; } else { r1 = (r4 - 1.0f) / (r4 + 1.0f); } //yg_t = tanhf(yg_t); // single precision yg[index] = r1; /* // memory cell y_c.AddMatDotMat(1.0, y_g, kNoTrans, y_i, kNoTrans, 0.0); y_c.AddMatDotMat(1.0, YC.RowRange((t-1)*S,S), kNoTrans, y_f, kNoTrans, 1.0); // the tanh-squashed version of c y_h.Tanh(y_c); */ r1 = r1 * r2 + r3; // r1 = yi, r2 = yg, r3 = yf * ycr, r1 = yc (after) // clip cell memory r1 = APPLY_CUDA_MAX_GRADIENT(r1, max_grad); r4 = __expf(2.0f * r1); if(isinf(r4)) { r2 = 1.0f; } else { r2 = (r4 - 1.0f) / (r4 + 1.0f); } //yh_t = tanh(yh_t); // single precision /* // output gate y_o.AddMatDiagVec(1.0, y_c, kNoTrans, phole_o_c_fw_, 1.0); y_o.Sigmoid(y_o); // the final output y_m.AddMatDotMat(1.0, y_h, kNoTrans, y_o, kNoTrans, 0.0); */ r3 = r1 * po[j] + yo[index]; // OLD: r3 = 1.0f / (1.0f + __expf(-r3)); // single precision r3 = __expf(-r3); if (isinf(r3)) { r3 = 0.0f; } else { r3 = 1.0f / (1.0f + r3); // single precision } ym[index] = r2 * r3; //r2= yh from above yc[index] = r1; yh[index] = r2; yo[index] = r3; } } template<typename Real> __global__ static void _propagate_lstm_pointwiseops(Real *yi, Real *yf, Real *yg, Real *yo, Real *yc, Real *yh, Real *ym, const Real *ycr, const Real *pi, const Real *pf, const Real *po, const Real *rm, MatrixDim mat_dim, int mat2_row_stride, int mat2_col_stride, const bool nml, const Real max_grad = 100 ) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j; if (i < mat_dim.rows && j < mat_dim.cols) { /* // input gate y_i.AddMatDiagVec(1.0, YC.RowRange((t-1)*S,S), kNoTrans, phole_i_c_fw_, 1.0); // forget gate y_f.AddMatDiagVec(1.0, YC.RowRange((t-1)*S,S), kNoTrans, phole_f_c_fw_, 1.0); // apply sigmoid/tanh functionis to squash the outputs y_i.Sigmoid(y_i); y_f.Sigmoid(y_f); */ Real r1 = ycr[index]; //3 Real r2 = yi[index]; //5 Real r3 = yf[index]; //5 Real r4; // input gate r2 = r1 * pi[j] + r2; // OLD: r2 = 1.0f / (1.0f + __expf(-r2)); //1.0 / (1.0 + expf(-ycr_t * pi[j] - yi_t)); // single precision r2 = __expf(-r2); if (isinf(r2)) { r2 = 0.0f; } else { r2 = 1.0f / (1.0f + r2); //1.0 / (1.0 + expf(-ycr_t * pi[j] - yi_t)); // single precision } yi[index] = r2; // forget gate r3 = r1 * pf[j] + r3; // OLD: r3 = 1.0f / (1.0f + __expf(-r3)); //(1.0 + expf(-ycr_t * pf[j] - yf_t)); // single precision r3 = __expf(-r3); if (isinf(r3)) { r3 = 0.0f; } else { r3 = 1.0f / (1.0f + r3); //(1.0 + expf(-ycr_t * pf[j] - yf_t)); // single precision } yf[index] = r3; r3 = r3 * r1; // input tanh y_g.Tanh(y_g); r4 = __expf(2.0f * yg[index]); if(isinf(r4)) { r1 = 1.0f; } else { r1 = (r4 - 1.0f) / (r4 + 1.0f); } //yg_t = tanhf(yg_t); // single precision yg[index] = r1; /* // memory cell y_c.AddMatDotMat(1.0, y_g, kNoTrans, y_i, kNoTrans, 0.0); if (no_mem_loss_dropout) y_c.AddMatDotMat(1.0, r_mask, kNoTrans, y_c, kNoTrans, 0.0); y_c.AddMatDotMat(1.0, YC.RowRange((t-1)*S,S), kNoTrans, y_f, kNoTrans, 1.0); if (rnndrop) y_c.AddMatDotMat(1.0, r_mask, kNoTrans, y_c, kNoTrans, 0.0); // the tanh-squashed version of c y_h.Tanh(y_c); */ if (nml) r1 = rm[i * mat2_row_stride + j] * r2 * r1 + r3; // r1 = yi, r2 = yg, r3 = yf * ycr, r1 = yc (after) else // then rnndrop r1 = rm[i * mat2_row_stride + j] * (r2 * r1 + r3); // r1 = yi, r2 = yg, r3 = yf * ycr, r1 = yc (after) // clip cell memory r1 = APPLY_CUDA_MAX_GRADIENT(r1, max_grad); r4 = __expf(2.0f * r1); if(isinf(r4)) { r2 = 1.0f; } else { r2 = (r4 - 1.0f) / (r4 + 1.0f); } //yh_t = tanh(yh_t); // single precision /* // output gate y_o.AddMatDiagVec(1.0, y_c, kNoTrans, phole_o_c_fw_, 1.0); y_o.Sigmoid(y_o); // the final output y_m.AddMatDotMat(1.0, y_h, kNoTrans, y_o, kNoTrans, 0.0); */ r3 = r1 * po[j] + yo[index]; // OLD: r3 = 1.0f / (1.0f + __expf(-r3)); // single precision r3 = __expf(-r3); if (isinf(r3)) { r3 = 0.0f; } else { r3 = 1.0f / (1.0f + r3); // single precision } ym[index] = r2 * r3; yc[index] = r1; yh[index] = r2; yo[index] = r3; } } template<typename Real> __global__ static void _backpropagate_lstm_pointwiseops_nodrop(const Real *yi, const Real *yf, const Real *yg, const Real *yo, const Real *yc, const Real *yh, const Real *ym, Real *di, Real *df, Real *dg, Real *d_o, Real *dc, Real *dh, Real *dm, Real *dcm, const Real *dir, const Real *dfr, const Real *dcr, const Real *dcmr, const Real *yfr, const Real *ycr, const Real *pi, const Real *pf, const Real *po, MatrixDim mat_dim, const Real max_grad = 100 ) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j; if (i < mat_dim.rows && j < mat_dim.cols) { /* { // d_h d_h.AddMatDotMat(1.0, d_m, kNoTrans, y_o, kNoTrans, 0.0); d_h.DiffTanh(y_h, d_h); // d_o d_o.AddMatDotMat(1.0, d_m, kNoTrans, y_h, kNoTrans, 0.0); d_o.DiffSigmoid(y_o, d_o); } // d_c d_c.AddMat(1.0, d_h); d_c.AddMatDiagVec(1.0, DI.RowRange((t+1)*S,S), kNoTrans, phole_i_c_fw_, 1.0); d_c.AddMatDiagVec(1.0, DF.RowRange((t+1)*S,S), kNoTrans, phole_f_c_fw_, 1.0); d_c.AddMatDiagVec(1.0, d_o, kNoTrans, phole_o_c_fw_, 1.0); // d_f d_f.AddMatDotMat(1.0, d_c, kNoTrans, YC.RowRange((t-1)*S,S), kNoTrans, 0.0); d_f.DiffSigmoid(y_f, d_f); // d_i d_i.AddMatDotMat(1.0, d_c_m, kNoTrans, y_g, kNoTrans, 0.0); d_i.DiffSigmoid(y_i, d_i); // d_g d_g.AddMatDotMat(1.0, d_c_m, kNoTrans, y_i, kNoTrans, 0.0); d_g.DiffTanh(y_g, d_g); */ Real r1 = dm[index]; // 2 Real r2 = yo[index]; //3 Real r3 = yh[index]; //3 Real r4; // Clip dm if needed r1 = APPLY_CUDA_MAX_GRADIENT(r1, max_grad); dm[index] = r1; r4 = (1.0f - r3 * r3) * r1 * r2; // r4 = dh r2 = (1.0f - r2) * r2 * r1 * r3; // r2 = do r4 = APPLY_CUDA_MAX_GRADIENT(r4, max_grad); r2 = APPLY_CUDA_MAX_GRADIENT(r2, max_grad); dh[index] = r4; d_o[index] = r2; r1 = dc[index]; //r1 = dc r3 = yf[index]; // r3 = yf r1 = r4 + dir[index] * pi[j] + dfr[index] * pf[j] + r2 * po[j] + dcr[index] * yfr[index] + r1 ; //r1 = dc r2 = (1.0f - r3) * r3 * r1 * ycr[index]; // r2 = df r1 = APPLY_CUDA_MAX_GRADIENT(r1, max_grad); r2 = APPLY_CUDA_MAX_GRADIENT(r2, max_grad); df[index] = r2; r2 = yi[index]; // r2 = yi r3 = yg[index]; // r3 = yg r4 = (1.0f - r2) * r2 * r1 * r3; di[index] = APPLY_CUDA_MAX_GRADIENT(r4, max_grad); //(1.0f - r2) * r2 * r1 * r3; r4 = (1.0f - r3 * r3) * r1 * r2; dg[index] = APPLY_CUDA_MAX_GRADIENT(r4, max_grad); //(1.0f - r3 * r3) * r1 * r2; dc[index] = r1; } } template<typename Real> __global__ static void _backpropagate_lstm_pointwiseops(const Real *yi, const Real *yf, const Real *yg, const Real *yo, const Real *yc, const Real *yh, const Real *ym, Real *di, Real *df, Real *dg, Real *d_o, Real *dc, Real *dh, Real *dm, Real *dcm, const Real *dir, const Real *dfr, const Real *dcr, const Real *dcmr, const Real *yfr, const Real *ycr, const Real *pi, const Real *pf, const Real *po, const Real *rm, MatrixDim mat_dim, int mat2_row_stride, int mat2_col_stride, const bool nml, const Real max_grad = 100 ) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j; if (i < mat_dim.rows && j < mat_dim.cols) { /* { // d_h d_h.AddMatDotMat(1.0, d_m, kNoTrans, y_o, kNoTrans, 0.0); d_h.DiffTanh(y_h, d_h); // d_o d_o.AddMatDotMat(1.0, d_m, kNoTrans, y_h, kNoTrans, 0.0); d_o.DiffSigmoid(y_o, d_o); } // d_c d_c.AddMat(1.0, d_h); d_c.AddMatDiagVec(1.0, DI.RowRange((t+1)*S,S), kNoTrans, phole_i_c_fw_, 1.0); d_c.AddMatDiagVec(1.0, DF.RowRange((t+1)*S,S), kNoTrans, phole_f_c_fw_, 1.0); d_c.AddMatDiagVec(1.0, d_o, kNoTrans, phole_o_c_fw_, 1.0); if (rnndrop) { d_c.AddMatDotMat(1.0, DCM.RowRange((t+1)*S,S), kNoTrans, YF.RowRange((t+1)*S,S), kNoTrans, 1.0); d_c_m.AddMatDotMat(1.0, d_c, kNoTrans, r_mask, kNoTrans, 0.0); } if (no_mem_loss_dropout) { d_c.AddMatDotMat(1.0, DC.RowRange((t+1)*S,S), kNoTrans, YF.RowRange((t+1)*S,S), kNoTrans, 1.0); d_c_m.AddMatDotMat(1.0, d_c, kNoTrans, r_mask, kNoTrans, 0.0); } // d_f if (rnndrop ) { d_f.AddMatDotMat(1.0, d_c_m, kNoTrans, YC.RowRange((t-1)*S,S), kNoTrans, 0.0); } else { d_f.AddMatDotMat(1.0, d_c, kNoTrans, YC.RowRange((t-1)*S,S), kNoTrans, 0.0); } d_f.DiffSigmoid(y_f, d_f); // d_i d_i.AddMatDotMat(1.0, d_c_m, kNoTrans, y_g, kNoTrans, 0.0); d_i.DiffSigmoid(y_i, d_i); // d_g d_g.AddMatDotMat(1.0, d_c_m, kNoTrans, y_i, kNoTrans, 0.0); d_g.DiffTanh(y_g, d_g); */ Real r1 = dm[index]; // 2 Real r2 = yo[index]; //3 Real r3 = yh[index]; //3 Real r4; // Clip dm if needed r1 = APPLY_CUDA_MAX_GRADIENT(r1, max_grad); // 2 dm[index] = r1; r4 = (1.0f - r3 * r3) * r1 * r2; // r4 = dh r2 = (1.0f - r2) * r2 * r1 * r3; // r2 = do r4 = APPLY_CUDA_MAX_GRADIENT(r4, max_grad); r2 = APPLY_CUDA_MAX_GRADIENT(r2, max_grad); r3 = dc[index]; //r3 = dc r3 = r4 + dir[index] * pi[j] + dfr[index] * pf[j] + r2 * po[j] + r3; //r3 = dc dh[index] = r4; d_o[index] = r2; if (nml) { r3 = dcr[index] * yfr[index] + r3; //r3 = dc r3 = APPLY_CUDA_MAX_GRADIENT(r3, max_grad); r1 = r3 * rm[i * mat2_row_stride + j]; //r1 = dcm r2 = r3 * ycr[index]; // r2 = df } else { r3 = dcmr[i * mat2_row_stride + j] * yfr[index] + r3; //r3 = dc r3 = APPLY_CUDA_MAX_GRADIENT(r3, max_grad); r1 = r3 * rm[i * mat2_row_stride + j]; //r1 = dcm r2 = r1 * ycr[index]; //r2 = df } dc[index] = r3; // we clip in the conditional above r3 = yf[index]; // r3 = yf r2 = (1.0f - r3) * r3 * r2; // r2 = df r2 = APPLY_CUDA_MAX_GRADIENT(r2, max_grad); df[index] = r2; r2 = yi[index]; // r2 = yi r3 = yg[index]; // r3 = yg r4 = (1.0f - r2) * r2 * r1 * r3; di[index] = APPLY_CUDA_MAX_GRADIENT(r4, max_grad); //(1.0f - r2) * r2 * r1 * r3; r4 = (1.0f - r3 * r3) * r1 * r2; dg[index] = APPLY_CUDA_MAX_GRADIENT(r4, max_grad); //(1.0f - r3 * r3) * r1 * r2; dcm[i * mat2_row_stride + j] = r1; // don't clip dcm since we clip dc in conditional } } void cudaF_propagate_lstm_pointwiseops(dim3 Gr, dim3 Bl, float *yi, float *yf, float *yg, float *yo, float *yc, float *yh, float *ym, const float *ycr, const float *pi, const float *pf, const float *po, const float *rm, MatrixDim mat_dim, int mat2_row_stride, int mat2_col_stride, const bool nml, cudaStream_t &stream ){ _propagate_lstm_pointwiseops<<<Gr,Bl,0,stream>>>(yi, yf, yg, yo, yc, yh, ym, ycr, pi, pf, po, rm, mat_dim, mat2_row_stride, mat2_col_stride, nml); } void cudaF_propagate_lstm_pointwiseops_nodrop(dim3 Gr, dim3 Bl, float *yi, float *yf, float *yg, float *yo, float *yc, float *yh, float *ym, const float *ycr, const float *pi, const float *pf, const float *po, MatrixDim mat_dim, cudaStream_t &stream ){ _propagate_lstm_pointwiseops_nodrop<<<Gr,Bl,0,stream>>>(yi, yf, yg, yo, yc, yh, ym, ycr, pi, pf, po, mat_dim); } void cudaF_backpropagate_lstm_pointwiseops(dim3 Gr, dim3 Bl, const float *yi, const float *yf, const float *yg, const float *yo, const float *yc, const float *yh, const float *ym, float *di, float *df, float *dg, float *d_o, float *dc, float *dh, float *dm, float *dcm, const float *dir, const float *dfr, const float *dcr, const float *dcmr, const float *yfr, const float *ycr, const float *pi, const float *pf, const float *po, const float *rm, MatrixDim mat_dim, int mat2_row_stride, int mat2_col_stride, const bool nml, cudaStream_t &stream ) { _backpropagate_lstm_pointwiseops<<<Gr,Bl,0,stream>>>(yi, yf, yg, yo, yc, yh, ym, di, df, dg, d_o, dc, dh, dm, dcm, dir, dfr, dcr, dcmr, yfr, ycr, pi, pf, po, rm, mat_dim, mat2_row_stride, mat2_col_stride, nml); } void cudaF_backpropagate_lstm_pointwiseops_nodrop(dim3 Gr, dim3 Bl, const float *yi, const float *yf, const float *yg, const float *yo, const float *yc, const float *yh, const float *ym, float *di, float *df, float *dg, float *d_o, float *dc, float *dh, float *dm, float *dcm, const float *dir, const float *dfr, const float *dcr, const float *dcmr, const float *yfr, const float *ycr, const float *pi, const float *pf, const float *po, MatrixDim mat_dim, cudaStream_t &stream ) { _backpropagate_lstm_pointwiseops_nodrop<<<Gr,Bl,0,stream>>>(yi, yf, yg, yo, yc, yh, ym, di, df, dg, d_o, dc, dh, dm, dcm, dir, dfr, dcr, dcmr, yfr, ycr, pi, pf, po, mat_dim ); } template<typename Real> __global__ static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim, const Real *mat2, int mat2_row_stride, int mat2_col_stride, const Real *vec, Real beta) { // Note from Dan: in this kernel, we make the x dimension correspond to the // row index and y to the column index. That was not always the case for // earlier kernels written by others. int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j, index2 = i * mat2_row_stride + j * mat2_col_stride; if (i < mat_dim.rows && j < mat_dim.cols) { mat[index] = alpha * mat2[index2] * vec[j] + beta * mat[index]; } } template<typename Real> __global__ static void _add_mat_dot_mat(Real *data, const Real *srcA_data, const Real *srcB_data, int trasA, int transB, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { // 1 represents kTrans, 0 represents kNoTrans // but for now, only kNoTrans is availiable int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j*dim.stride; int32_cuda srcA_index = i + j*srcA_stride; int32_cuda srcB_index = i + j*srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha*srcA_data[srcA_index]*srcB_data[srcB_index] + beta * data[tgt_index] ; } } void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *mat2, int mat2_row_stride, int mat2_col_stride, const float *vec, float beta) { _add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *mat2, int mat2_row_stride, int mat2_col_stride, const double *vec, double beta) { _add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaF_add_mat_dot_mat(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, int transA, int transB, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { _add_mat_dot_mat<<<Gr, Bl>>>(data, srcA_data, srcB_data, transA, transB, dim, srcA_stride, srcB_stride, alpha, beta); } void cudaD_add_mat_dot_mat(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, int transA, int transB, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { _add_mat_dot_mat<<<Gr, Bl>>>(data, srcA_data, srcB_data, transA, transB, dim, srcA_stride, srcB_stride, alpha, beta); } /* * All the following kernels are written by Yajie Miao for CTC training */ template<typename Real> __global__ static void _compute_ctc_alpha_one_sequence(Real* mat_alpha, int row, MatrixDim dim_alpha, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda dim = dim_alpha.cols; if (i < dim) { int32_cuda index_alpha = i + row * dim_alpha.stride; int32_cuda class_idx = labels[i]; int32_cuda index_prob = class_idx + row * dim_prob.stride; int32_cuda index_alpha_rm1_i = i + (row - 1) * dim_alpha.stride; int32_cuda index_alpha_rm1_im1 = (i - 1) + (row - 1) * dim_alpha.stride; int32_cuda index_alpha_rm1_im2 = (i - 2) + (row - 1) * dim_alpha.stride; if (row == 0) { if (i < 2) mat_alpha[index_alpha] = mat_prob[index_prob]; else mat_alpha[index_alpha] = NumericLimits<Real>::log_zero_; } else { if (i > 1) { if (i % 2 == 0 || labels[i-2] == labels[i]) { mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i])); } else { Real tmp = LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]); mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im2], tmp)); } } else if (i == 1) { mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i])); } else { mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], mat_alpha[index_alpha_rm1_i]); } } } } template<typename Real> __global__ static void _compute_ctc_alpha_multiple_sequence(Real* mat_alpha, int sequence_num, int row, MatrixDim dim_alpha, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels, int32_cuda dim_label_stride, const int32_cuda* seq_lengths) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index, that is, the index for sequence int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // label index, cannot exceed 2*|l|+1 int32_cuda dim = dim_alpha.cols; if (j >= dim || i >= sequence_num) return; int32_cuda index_alpha = j + (row * sequence_num + i) * dim_alpha.stride; int32_cuda index_label = j + i * dim_label_stride; int32_cuda class_idx = labels[index_label];// if -1, this is the padding cell;labels now is a matrix which has the same size as mat_alpha if (class_idx == -1 || row >= seq_lengths[i]) { mat_alpha[index_alpha] = NumericLimits<Real>::log_zero_; return; } int32_cuda index_label_m2 = (j-2) + i * dim_label_stride; int32_cuda index_prob = class_idx + (row * sequence_num + i) * dim_prob.stride; int32_cuda index_alpha_rm1_i = j + ((row-1) * sequence_num + i) * dim_alpha.stride; int32_cuda index_alpha_rm1_im1 = (j-1) + ((row-1) * sequence_num + i) * dim_alpha.stride; int32_cuda index_alpha_rm1_im2 = (j-2) + ((row-1) * sequence_num + i) * dim_alpha.stride; if (row == 0) { if (j < 2) mat_alpha[index_alpha] = mat_prob[index_prob]; else mat_alpha[index_alpha] = NumericLimits<Real>::log_zero_; } else { if (j > 1) { if (j % 2 == 0 || labels[index_label_m2] == labels[index_label]) { mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i])); } else { Real tmp = LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]); mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im2], tmp)); } } else if (j == 1) { mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i])); } else { mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], mat_alpha[index_alpha_rm1_i]); } } } template<typename Real> __global__ static void _compute_ctc_alpha_one_sequence_rescale(Real* mat_alpha, int row, MatrixDim dim_alpha, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda dim = dim_alpha.cols; if (i < dim) { int32_cuda index_alpha = i + row * dim_alpha.stride; int32_cuda class_idx = labels[i]; int32_cuda index_prob = class_idx + row * dim_prob.stride; int32_cuda index_alpha_rm1_i = i + (row - 1) * dim_alpha.stride; int32_cuda index_alpha_rm1_im1 = (i - 1) + (row - 1) * dim_alpha.stride; int32_cuda index_alpha_rm1_im2 = (i - 2) + (row - 1) * dim_alpha.stride; if (row == 0) { if (i < 2) mat_alpha[index_alpha] = mat_prob[index_prob]; else mat_alpha[index_alpha] = 0.0; } else { if (i > 1) { if (i % 2 == 0 || labels[i-2] == labels[i]) { mat_alpha[index_alpha] = mat_prob[index_prob] * (mat_alpha[index_alpha_rm1_im1] + mat_alpha[index_alpha_rm1_i]); } else { mat_alpha[index_alpha] = mat_prob[index_prob] * (mat_alpha[index_alpha_rm1_im1] + mat_alpha[index_alpha_rm1_i] + mat_alpha[index_alpha_rm1_im2]); } } else if (i == 1) { mat_alpha[index_alpha] = mat_prob[index_prob] * (mat_alpha[index_alpha_rm1_im1] + mat_alpha[index_alpha_rm1_i]); } else { mat_alpha[index_alpha] = mat_prob[index_prob] * mat_alpha[index_alpha_rm1_i]; } } } } template<typename Real> __global__ static void _compute_ctc_beta_one_sequence(Real* mat_beta, int row, MatrixDim dim_beta, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda dim = dim_beta.cols; if (i < dim) { int32_cuda index_beta = i + row * dim_beta.stride; int32_cuda class_idx = labels[i]; int32_cuda index_prob = class_idx + row * dim_prob.stride; int32_cuda index_beta_rp1_i = i + (row + 1) * dim_beta.stride; int32_cuda index_beta_rp1_ip1 = (i + 1) + (row + 1) * dim_beta.stride; int32_cuda index_beta_rp1_ip2 = (i + 2) + (row + 1) * dim_beta.stride; int32_cuda row_num = dim_beta.rows; if (row == row_num - 1) { if (i > dim - 3) mat_beta[index_beta] = mat_prob[index_prob]; else mat_beta[index_beta] = NumericLimits<Real>::log_zero_; } else { if (i < dim - 2) { if (i % 2 == 0 || labels[i+2] == labels[i]) { mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i])); } else { Real tmp = LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]); mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip2], tmp)); } } else if (i == dim - 2) { mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i])); } else { mat_beta[index_beta] = AddAB(mat_prob[index_prob], mat_beta[index_beta_rp1_i]); } } } } template<typename Real> __global__ static void _compute_ctc_beta_multiple_sequence(Real* mat_beta, int sequence_num, int row, MatrixDim dim_beta, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels, int32_cuda dim_label_stride, const int32_cuda* seq_lengths, const int32_cuda* label_lengths) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index, that is, the index for sequence int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // label index, cannot exceed 2*|l|+1 int32_cuda dim = dim_beta.cols; if (j >= dim || i >= sequence_num) return; int32_cuda index_beta = j + (row * sequence_num + i) * dim_beta.stride; int32_cuda index_label = j + i * dim_label_stride; int32_cuda class_idx = labels[index_label];// if -1, this is the padding cell;labels now is a matrix which has the same size as mat_alpha if (class_idx == -1 || row >= seq_lengths[i]) { mat_beta[index_beta] = NumericLimits<Real>::log_zero_; return; } int32_cuda index_label_p2 = (j+2) + i * dim_label_stride; int32_cuda index_prob = class_idx + (row * sequence_num + i) * dim_prob.stride; int32_cuda index_beta_rp1_i = j + ((row+1) * sequence_num + i) * dim_beta.stride; int32_cuda index_beta_rp1_ip1 = (j+1) + ((row+1) * sequence_num + i) * dim_beta.stride; int32_cuda index_beta_rp1_ip2 = (j+2) + ((row+1) * sequence_num + i) * dim_beta.stride; int32_cuda row_num = seq_lengths[i]; int32_cuda label_len = label_lengths[i]; /* if (row == row_num - 1) { if (j > dim - 3) mat_beta[index_beta] = mat_prob[index_prob]; else mat_beta[index_beta] = NumericLimits<Real>::log_zero_; } else { if (j < dim - 2) { if (j % 2 == 0 || labels[index_label_p2] == labels[index_label]) { mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i])); } else { Real tmp = LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]); mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip2], tmp)); } } else if (j == dim - 2) { mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i])); } else { mat_beta[index_beta] = AddAB(mat_prob[index_prob], mat_beta[index_beta_rp1_i]); } } */ if (row == row_num - 1) { if (j > label_len - 3) mat_beta[index_beta] = mat_prob[index_prob]; else mat_beta[index_beta] = NumericLimits<Real>::log_zero_; } else { if (j < label_len - 2) { if (j % 2 == 0 || labels[index_label_p2] == labels[index_label]) { mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i])); } else { Real tmp = LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]); mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip2], tmp)); } } else if (j == label_len - 2) { mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i])); } else { mat_beta[index_beta] = AddAB(mat_prob[index_prob], mat_beta[index_beta_rp1_i]); } } } template<typename Real> __global__ static void _compute_ctc_beta_one_sequence_rescale(Real* mat_beta, int row, MatrixDim dim_beta, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda dim = dim_beta.cols; if (i < dim) { int32_cuda index_beta = i + row * dim_beta.stride; int32_cuda class_idx = labels[i]; int32_cuda index_prob = class_idx + row * dim_prob.stride; int32_cuda index_beta_rp1_i = i + (row + 1) * dim_beta.stride; int32_cuda index_beta_rp1_ip1 = (i + 1) + (row + 1) * dim_beta.stride; int32_cuda index_beta_rp1_ip2 = (i + 2) + (row + 1) * dim_beta.stride; int32_cuda row_num = dim_beta.rows; if (row == row_num - 1) { if (i > dim - 3) mat_beta[index_beta] = mat_prob[index_prob]; else mat_beta[index_beta] = 0; } else { if (i < dim - 2) { if (i % 2 == 0 || labels[i+2] == labels[i]) { mat_beta[index_beta] = mat_prob[index_prob] * (mat_beta[index_beta_rp1_ip1] + mat_beta[index_beta_rp1_i]); } else { mat_beta[index_beta] = mat_prob[index_prob] * (mat_beta[index_beta_rp1_ip1] + mat_beta[index_beta_rp1_i] + mat_beta[index_beta_rp1_ip2]); } } else if (i == dim - 2) { mat_beta[index_beta] = mat_prob[index_prob] * (mat_beta[index_beta_rp1_ip1] + mat_beta[index_beta_rp1_i]); } else { mat_beta[index_beta] = mat_prob[index_prob] * mat_beta[index_beta_rp1_i]; } } } } // mat_prob are in probability scale. template<typename Real> __global__ static void _compute_ctc_error_one_sequence(Real* mat_error, MatrixDim dim_error, const Real* mat_alpha, const Real* mat_beta, MatrixDim dim_alpha, const Real* mat_prob, const int32_cuda* labels, Real pzx) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index if (i < dim_error.rows && j < dim_error.cols) { Real err = NumericLimits<Real>::log_zero_; int32_cuda index_error = i * dim_error.stride + j; for(int s = 0; s < dim_alpha.cols; s++) { if (labels[s] == j) { // int32_cuda index_alpha = i * dim_alpha.stride + s; err = LogAPlusB(err, AddAB(mat_alpha[index_alpha], mat_beta[index_alpha])); } } Real val = ExpA(SubAB(err, AddAB(pzx, mat_prob[index_error] == 0? NumericLimits<Real>::log_zero_ : 2*log(mat_prob[index_error])))); mat_error[index_error] = -1.0 * val; } } // mat_prob are in probability scale. template<typename Real> __global__ static void _compute_ctc_error_multiple_sequence(Real* mat_error, int32_cuda sequence_num, MatrixDim dim_error, const Real* mat_alpha, const Real* mat_beta, MatrixDim dim_alpha, const Real* mat_prob, const int32_cuda* labels, int32_cuda dim_label_stride, const int32_cuda* seq_lengths, const Real* pzx) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index if (i >= dim_error.rows || j >= dim_error.cols) return; int32_cuda seqX = i % sequence_num; int32_cuda rowX = i / sequence_num; if (rowX >= seq_lengths[seqX]) return; Real err = NumericLimits<Real>::log_zero_; int32_cuda index_error = i * dim_error.stride + j; for(int s = 0; s < dim_alpha.cols; s++) { int32_cuda index_label = s + seqX * dim_label_stride; if (labels[index_label] == -1) {continue;} if (labels[index_label] == j) { // int32_cuda index_alpha = i * dim_alpha.stride + s; err = LogAPlusB(err, AddAB(mat_alpha[index_alpha], mat_beta[index_alpha])); } } Real val = ExpA(SubAB(err, AddAB(pzx[seqX], mat_prob[index_error] == 0? NumericLimits<Real>::log_zero_ : 2*log(mat_prob[index_error])))); mat_error[index_error] = -1.0 * val; } template<typename Real> __global__ static void _distribute_prob_by_label(Real* mat_prob_dist, MatrixDim dim_prob_dist, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index if (i < dim_prob_dist.rows && j < dim_prob_dist.cols) { int32_cuda index_prob_dist = i * dim_prob_dist.stride + j; int32_cuda index_prob = i * dim_prob.stride + labels[j]; mat_prob_dist[index_prob_dist] = mat_prob[index_prob]; } } // directly get the errors for the prior-softmax values template<typename Real> __global__ static void _compute_ctc_error_one_sequence_rescale(Real* mat_error, MatrixDim dim_error, const Real* mat_alpha, const Real* mat_beta, MatrixDim dim_alpha, const Real* mat_prob, const int32_cuda* labels, const Real* zt) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index if (i < dim_error.rows && j < dim_error.cols) { Real err = 0; int32_cuda index_error = i * dim_error.stride + j; for(int s = 0; s < dim_alpha.cols; s++) { if (labels[s] == j) { // int32_cuda index_alpha = i * dim_alpha.stride + s; err += mat_alpha[index_alpha] * mat_beta[index_alpha]; } } if (mat_prob[index_error] == 0 || zt[i] == 0) { mat_error[index_error] = 0; } else { mat_error[index_error] = mat_prob[index_error] - (err / zt[i]) / mat_prob[index_error]; } } } void cudaF_compute_ctc_alpha(dim3 Gr, dim3 Bl, float *alpha, int row_idx, MatrixDim dim_alpha, const float *prob, MatrixDim dim_prob, const int *labels) { _compute_ctc_alpha_one_sequence<<<Gr, Bl>>>(alpha, row_idx, dim_alpha, prob, dim_prob, labels); } void cudaF_compute_ctc_beta(dim3 Gr, dim3 Bl, float *beta, int row_idx, MatrixDim dim_beta, const float *prob, MatrixDim dim_prob, const int *labels) { _compute_ctc_beta_one_sequence<<<Gr, Bl>>>(beta, row_idx, dim_beta, prob, dim_prob, labels); } void cudaF_compute_ctc_error(dim3 Gr, dim3 Bl, float *error, MatrixDim dim_error, const float *alpha, const float *beta, MatrixDim dim_alpha, const float *prob, const int *labels, float pzx) { _compute_ctc_error_one_sequence<<<Gr, Bl>>>(error, dim_error, alpha, beta, dim_alpha, prob, labels, pzx); } void cudaF_compute_ctc_alpha_rescale(dim3 Gr, dim3 Bl, float *alpha, int row_idx, MatrixDim dim_alpha, const float *prob, MatrixDim dim_prob, const int *labels) { _compute_ctc_alpha_one_sequence_rescale<<<Gr, Bl>>>(alpha, row_idx, dim_alpha, prob, dim_prob, labels); } void cudaF_compute_ctc_beta_rescale(dim3 Gr, dim3 Bl, float *beta, int row_idx, MatrixDim dim_beta, const float *prob, MatrixDim dim_prob, const int *labels) { _compute_ctc_beta_one_sequence_rescale<<<Gr, Bl>>>(beta, row_idx, dim_beta, prob, dim_prob, labels); } void cudaF_compute_ctc_error_rescale(dim3 Gr, dim3 Bl, float *error, MatrixDim dim_error, const float *alpha, const float *beta, MatrixDim dim_alpha, const float *prob, const int *labels, const float *zt) { _compute_ctc_error_one_sequence_rescale<<<Gr, Bl>>>(error, dim_error, alpha, beta, dim_alpha, prob, labels, zt); } void cudaF_distribute_prob_by_label(dim3 Gr, dim3 Bl, float *prob_dist, MatrixDim dim_prob_dist, const float *prob, MatrixDim dim_prob, const int *labels) { _distribute_prob_by_label<<<Gr, Bl>>>(prob_dist, dim_prob_dist, prob, dim_prob, labels); } void cudaF_compute_ctc_alpha_multiple_sequence(dim3 Gr, dim3 Bl, float *alpha, int seq_num, int row_idx, MatrixDim dim_alpha, const float *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths) { _compute_ctc_alpha_multiple_sequence<<<Gr, Bl>>>(alpha, seq_num, row_idx, dim_alpha, prob, dim_prob, labels, dim_label_stride, seq_lengths); } void cudaF_compute_ctc_beta_multiple_sequence(dim3 Gr, dim3 Bl, float *beta, int seq_num, int row_idx, MatrixDim dim_beta, const float *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths, const int *label_lengths) { _compute_ctc_beta_multiple_sequence<<<Gr, Bl>>>(beta, seq_num, row_idx, dim_beta, prob, dim_prob, labels, dim_label_stride, seq_lengths, label_lengths); } void cudaF_compute_ctc_error_multiple_sequence(dim3 Gr, dim3 Bl, float *error, int seq_num, MatrixDim dim_error, const float *alpha, const float *beta, MatrixDim dim_alpha, const float *prob, const int *labels, int dim_label_stride, const int *seq_lengths, const float *pzx) { _compute_ctc_error_multiple_sequence<<<Gr, Bl>>>(error, seq_num, dim_error, alpha, beta, dim_alpha, prob, labels, dim_label_stride, seq_lengths, pzx); } void cudaD_compute_ctc_alpha(dim3 Gr, dim3 Bl, double *alpha, int row_idx, MatrixDim dim_alpha, const double *prob, MatrixDim dim_prob, const int *labels) { _compute_ctc_alpha_one_sequence<<<Gr, Bl>>>(alpha, row_idx, dim_alpha, prob, dim_prob, labels); } void cudaD_compute_ctc_beta(dim3 Gr, dim3 Bl, double *beta, int row_idx, MatrixDim dim_beta, const double *prob, MatrixDim dim_prob, const int *labels) { _compute_ctc_beta_one_sequence<<<Gr, Bl>>>(beta, row_idx, dim_beta, prob, dim_prob, labels); } void cudaD_compute_ctc_error(dim3 Gr, dim3 Bl, double *error, MatrixDim dim_error, const double *alpha, const double *beta, MatrixDim dim_alpha, const double *prob, const int *labels, double pzx) { _compute_ctc_error_one_sequence<<<Gr, Bl>>>(error, dim_error, alpha, beta, dim_alpha, prob, labels, pzx); } void cudaD_compute_ctc_alpha_rescale(dim3 Gr, dim3 Bl, double *alpha, int row_idx, MatrixDim dim_alpha, const double *prob, MatrixDim dim_prob, const int *labels) { _compute_ctc_alpha_one_sequence_rescale<<<Gr, Bl>>>(alpha, row_idx, dim_alpha, prob, dim_prob, labels); } void cudaD_compute_ctc_beta_rescale(dim3 Gr, dim3 Bl, double *beta, int row_idx, MatrixDim dim_beta, const double *prob, MatrixDim dim_prob, const int *labels) { _compute_ctc_beta_one_sequence_rescale<<<Gr, Bl>>>(beta, row_idx, dim_beta, prob, dim_prob, labels); } void cudaD_compute_ctc_error_rescale(dim3 Gr, dim3 Bl, double *error, MatrixDim dim_error, const double *alpha, const double *beta, MatrixDim dim_alpha, const double *prob, const int *labels, const double *zt) { _compute_ctc_error_one_sequence_rescale<<<Gr, Bl>>>(error, dim_error, alpha, beta, dim_alpha, prob, labels, zt); } void cudaD_distribute_prob_by_label(dim3 Gr, dim3 Bl, double *prob_dist, MatrixDim dim_prob_dist, const double *prob, MatrixDim dim_prob, const int *labels) { _distribute_prob_by_label<<<Gr, Bl>>>(prob_dist, dim_prob_dist, prob, dim_prob, labels); } void cudaD_compute_ctc_alpha_multiple_sequence(dim3 Gr, dim3 Bl, double *alpha, int seq_num, int row_idx, MatrixDim dim_alpha, const double *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths) { _compute_ctc_alpha_multiple_sequence<<<Gr, Bl>>>(alpha, seq_num, row_idx, dim_alpha, prob, dim_prob, labels, dim_label_stride, seq_lengths); } void cudaD_compute_ctc_beta_multiple_sequence(dim3 Gr, dim3 Bl, double *beta, int seq_num, int row_idx, MatrixDim dim_beta, const double *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths, const int *label_lengths) { _compute_ctc_beta_multiple_sequence<<<Gr, Bl>>>(beta, seq_num, row_idx, dim_beta, prob, dim_prob, labels, dim_label_stride, seq_lengths, label_lengths); } void cudaD_compute_ctc_error_multiple_sequence(dim3 Gr, dim3 Bl, double *error, int seq_num, MatrixDim dim_error, const double *alpha, const double *beta, MatrixDim dim_alpha, const double *prob, const int *labels, int dim_label_stride, const int *seq_lengths, const double *pzx) { _compute_ctc_error_multiple_sequence<<<Gr, Bl>>>(error, seq_num, dim_error, alpha, beta, dim_alpha, prob, labels, dim_label_stride, seq_lengths, pzx); }
ccb935903ece3f4df7a2265a42824a12af2461a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _CUGAUSSIANFILTERXY_CU_ #define _CUGAUSSIANFILTERXY_CU_ #include "hipacc_types.hpp" #include "hipacc_math_functions.hpp" texture<uchar, hipTextureType1D, hipReadModeElementType> _texinputXY; const textureReference *_texinputXYRef; extern "C" { __global__ __launch_bounds__ (32*1) void cuGaussianFilterXYKernel(uchar * __restrict__ iter, int iter_width, int iter_height, int iter_stride, int input_width, int input_height, int input_stride, int bh_start_left, int bh_start_right, int bh_start_top, int bh_start_bottom, int bh_fall_back) { const int gid_x = blockDim.x * blockIdx.x + threadIdx.x; const int gid_y = blockDim.y * blockIdx.y * 8 + threadIdx.y; uchar _smeminput[12][97] __attribute__((shared)); if (bh_fall_back) goto BH_FB; if (blockIdx.x < bh_start_left && blockIdx.y < bh_start_top) goto BH_TL; if (blockIdx.x >= bh_start_right && blockIdx.y < bh_start_top) goto BH_TR; if (blockIdx.y < bh_start_top) goto BH_T; if (blockIdx.y >= bh_start_bottom && blockIdx.x < bh_start_left) goto BH_BL; if (blockIdx.y >= bh_start_bottom && blockIdx.x >= bh_start_right) goto BH_BR; if (blockIdx.y >= bh_start_bottom) goto BH_B; if (blockIdx.x >= bh_start_right) goto BH_R; if (blockIdx.x < bh_start_left) goto BH_L; goto BH_NO; BH_FB: { int _gid_x0 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y0 = gid_y + (-2); if (_gid_x0 >= input_width) _gid_x0 = input_width - 1; if (_gid_y0 >= input_height) _gid_y0 = input_height - 1; if (_gid_x0 < 0) _gid_x0 = 0; if (_gid_y0 < 0) _gid_y0 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y0) * input_stride + _gid_x0); int _gid_x1 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y1 = gid_y + (-2); if (_gid_x1 >= input_width) _gid_x1 = input_width - 1; if (_gid_y1 >= input_height) _gid_y1 = input_height - 1; if (_gid_x1 < 0) _gid_x1 = 0; if (_gid_y1 < 0) _gid_y1 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y1) * input_stride + _gid_x1); int _gid_x2 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y2 = gid_y + (-2); if (_gid_x2 >= input_width) _gid_x2 = input_width - 1; if (_gid_y2 >= input_height) _gid_y2 = input_height - 1; if (_gid_x2 < 0) _gid_x2 = 0; if (_gid_y2 < 0) _gid_y2 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y2) * input_stride + _gid_x2); int _gid_x3 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y3 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x3 >= input_width) _gid_x3 = input_width - 1; if (_gid_y3 >= input_height) _gid_y3 = input_height - 1; if (_gid_x3 < 0) _gid_x3 = 0; if (_gid_y3 < 0) _gid_y3 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y3) * input_stride + _gid_x3); int _gid_x4 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y4 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x4 >= input_width) _gid_x4 = input_width - 1; if (_gid_y4 >= input_height) _gid_y4 = input_height - 1; if (_gid_x4 < 0) _gid_x4 = 0; if (_gid_y4 < 0) _gid_y4 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y4) * input_stride + _gid_x4); int _gid_x5 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y5 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x5 >= input_width) _gid_x5 = input_width - 1; if (_gid_y5 >= input_height) _gid_y5 = input_height - 1; if (_gid_x5 < 0) _gid_x5 = 0; if (_gid_y5 < 0) _gid_y5 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y5) * input_stride + _gid_x5); int _gid_x6 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y6 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x6 >= input_width) _gid_x6 = input_width - 1; if (_gid_y6 >= input_height) _gid_y6 = input_height - 1; if (_gid_x6 < 0) _gid_x6 = 0; if (_gid_y6 < 0) _gid_y6 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y6) * input_stride + _gid_x6); int _gid_x7 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y7 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x7 >= input_width) _gid_x7 = input_width - 1; if (_gid_y7 >= input_height) _gid_y7 = input_height - 1; if (_gid_x7 < 0) _gid_x7 = 0; if (_gid_y7 < 0) _gid_y7 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y7) * input_stride + _gid_x7); int _gid_x8 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y8 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x8 >= input_width) _gid_x8 = input_width - 1; if (_gid_y8 >= input_height) _gid_y8 = input_height - 1; if (_gid_x8 < 0) _gid_x8 = 0; if (_gid_y8 < 0) _gid_y8 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y8) * input_stride + _gid_x8); int _gid_x9 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y9 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x9 >= input_width) _gid_x9 = input_width - 1; if (_gid_y9 >= input_height) _gid_y9 = input_height - 1; if (_gid_x9 < 0) _gid_x9 = 0; if (_gid_y9 < 0) _gid_y9 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y9) * input_stride + _gid_x9); int _gid_x10 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y10 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x10 >= input_width) _gid_x10 = input_width - 1; if (_gid_y10 >= input_height) _gid_y10 = input_height - 1; if (_gid_x10 < 0) _gid_x10 = 0; if (_gid_y10 < 0) _gid_y10 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y10) * input_stride + _gid_x10); int _gid_x11 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y11 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x11 >= input_width) _gid_x11 = input_width - 1; if (_gid_y11 >= input_height) _gid_y11 = input_height - 1; if (_gid_x11 < 0) _gid_x11 = 0; if (_gid_y11 < 0) _gid_y11 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y11) * input_stride + _gid_x11); int _gid_x12 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y12 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x12 >= input_width) _gid_x12 = input_width - 1; if (_gid_y12 >= input_height) _gid_y12 = input_height - 1; if (_gid_x12 < 0) _gid_x12 = 0; if (_gid_y12 < 0) _gid_y12 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y12) * input_stride + _gid_x12); int _gid_x13 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y13 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x13 >= input_width) _gid_x13 = input_width - 1; if (_gid_y13 >= input_height) _gid_y13 = input_height - 1; if (_gid_x13 < 0) _gid_x13 = 0; if (_gid_y13 < 0) _gid_y13 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y13) * input_stride + _gid_x13); int _gid_x14 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y14 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x14 >= input_width) _gid_x14 = input_width - 1; if (_gid_y14 >= input_height) _gid_y14 = input_height - 1; if (_gid_x14 < 0) _gid_x14 = 0; if (_gid_y14 < 0) _gid_y14 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y14) * input_stride + _gid_x14); int _gid_x15 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y15 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x15 >= input_width) _gid_x15 = input_width - 1; if (_gid_y15 >= input_height) _gid_y15 = input_height - 1; if (_gid_x15 < 0) _gid_x15 = 0; if (_gid_y15 < 0) _gid_y15 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y15) * input_stride + _gid_x15); int _gid_x16 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y16 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x16 >= input_width) _gid_x16 = input_width - 1; if (_gid_y16 >= input_height) _gid_y16 = input_height - 1; if (_gid_x16 < 0) _gid_x16 = 0; if (_gid_y16 < 0) _gid_y16 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y16) * input_stride + _gid_x16); int _gid_x17 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y17 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x17 >= input_width) _gid_x17 = input_width - 1; if (_gid_y17 >= input_height) _gid_y17 = input_height - 1; if (_gid_x17 < 0) _gid_x17 = 0; if (_gid_y17 < 0) _gid_y17 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y17) * input_stride + _gid_x17); int _gid_x18 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y18 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x18 >= input_width) _gid_x18 = input_width - 1; if (_gid_y18 >= input_height) _gid_y18 = input_height - 1; if (_gid_x18 < 0) _gid_x18 = 0; if (_gid_y18 < 0) _gid_y18 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y18) * input_stride + _gid_x18); int _gid_x19 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y19 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x19 >= input_width) _gid_x19 = input_width - 1; if (_gid_y19 >= input_height) _gid_y19 = input_height - 1; if (_gid_x19 < 0) _gid_x19 = 0; if (_gid_y19 < 0) _gid_y19 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y19) * input_stride + _gid_x19); int _gid_x20 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y20 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x20 >= input_width) _gid_x20 = input_width - 1; if (_gid_y20 >= input_height) _gid_y20 = input_height - 1; if (_gid_x20 < 0) _gid_x20 = 0; if (_gid_y20 < 0) _gid_y20 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y20) * input_stride + _gid_x20); int _gid_x21 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y21 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x21 >= input_width) _gid_x21 = input_width - 1; if (_gid_y21 >= input_height) _gid_y21 = input_height - 1; if (_gid_x21 < 0) _gid_x21 = 0; if (_gid_y21 < 0) _gid_y21 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y21) * input_stride + _gid_x21); int _gid_x22 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y22 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x22 >= input_width) _gid_x22 = input_width - 1; if (_gid_y22 >= input_height) _gid_y22 = input_height - 1; if (_gid_x22 < 0) _gid_x22 = 0; if (_gid_y22 < 0) _gid_y22 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y22) * input_stride + _gid_x22); int _gid_x23 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y23 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x23 >= input_width) _gid_x23 = input_width - 1; if (_gid_y23 >= input_height) _gid_y23 = input_height - 1; if (_gid_x23 < 0) _gid_x23 = 0; if (_gid_y23 < 0) _gid_y23 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y23) * input_stride + _gid_x23); int _gid_x24 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y24 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x24 >= input_width) _gid_x24 = input_width - 1; if (_gid_y24 >= input_height) _gid_y24 = input_height - 1; if (_gid_x24 < 0) _gid_x24 = 0; if (_gid_y24 < 0) _gid_y24 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y24) * input_stride + _gid_x24); int _gid_x25 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y25 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x25 >= input_width) _gid_x25 = input_width - 1; if (_gid_y25 >= input_height) _gid_y25 = input_height - 1; if (_gid_x25 < 0) _gid_x25 = 0; if (_gid_y25 < 0) _gid_y25 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y25) * input_stride + _gid_x25); int _gid_x26 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y26 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x26 >= input_width) _gid_x26 = input_width - 1; if (_gid_y26 >= input_height) _gid_y26 = input_height - 1; if (_gid_x26 < 0) _gid_x26 = 0; if (_gid_y26 < 0) _gid_y26 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y26) * input_stride + _gid_x26); int _gid_x27 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y27 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x27 >= input_width) _gid_x27 = input_width - 1; if (_gid_y27 >= input_height) _gid_y27 = input_height - 1; if (_gid_x27 < 0) _gid_x27 = 0; if (_gid_y27 < 0) _gid_y27 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y27) * input_stride + _gid_x27); int _gid_x28 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y28 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x28 >= input_width) _gid_x28 = input_width - 1; if (_gid_y28 >= input_height) _gid_y28 = input_height - 1; if (_gid_x28 < 0) _gid_x28 = 0; if (_gid_y28 < 0) _gid_y28 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y28) * input_stride + _gid_x28); int _gid_x29 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y29 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x29 >= input_width) _gid_x29 = input_width - 1; if (_gid_y29 >= input_height) _gid_y29 = input_height - 1; if (_gid_x29 < 0) _gid_x29 = 0; if (_gid_y29 < 0) _gid_y29 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y29) * input_stride + _gid_x29); int _gid_x30 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y30 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x30 >= input_width) _gid_x30 = input_width - 1; if (_gid_y30 >= input_height) _gid_y30 = input_height - 1; if (_gid_x30 < 0) _gid_x30 = 0; if (_gid_y30 < 0) _gid_y30 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y30) * input_stride + _gid_x30); int _gid_x31 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y31 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x31 >= input_width) _gid_x31 = input_width - 1; if (_gid_y31 >= input_height) _gid_y31 = input_height - 1; if (_gid_x31 < 0) _gid_x31 = 0; if (_gid_y31 < 0) _gid_y31 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y31) * input_stride + _gid_x31); int _gid_x32 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y32 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x32 >= input_width) _gid_x32 = input_width - 1; if (_gid_y32 >= input_height) _gid_y32 = input_height - 1; if (_gid_x32 < 0) _gid_x32 = 0; if (_gid_y32 < 0) _gid_y32 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y32) * input_stride + _gid_x32); int _gid_x33 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y33 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x33 >= input_width) _gid_x33 = input_width - 1; if (_gid_y33 >= input_height) _gid_y33 = input_height - 1; if (_gid_x33 < 0) _gid_x33 = 0; if (_gid_y33 < 0) _gid_y33 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y33) * input_stride + _gid_x33); int _gid_x34 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y34 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x34 >= input_width) _gid_x34 = input_width - 1; if (_gid_y34 >= input_height) _gid_y34 = input_height - 1; if (_gid_x34 < 0) _gid_x34 = 0; if (_gid_y34 < 0) _gid_y34 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y34) * input_stride + _gid_x34); int _gid_x35 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y35 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x35 >= input_width) _gid_x35 = input_width - 1; if (_gid_y35 >= input_height) _gid_y35 = input_height - 1; if (_gid_x35 < 0) _gid_x35 = 0; if (_gid_y35 < 0) _gid_y35 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y35) * input_stride + _gid_x35); __syncthreads(); if (gid_x < iter_width) { if (gid_y < iter_height) { float _tmp36 = 0.F; { _tmp36 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp36 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp36 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp36 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp36 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp36 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp36 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp36 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp36 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp36 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp36 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp36 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp36 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp36 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp36 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp36 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp36 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp36 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 1 * (int)blockDim.y < iter_height) { float _tmp37 = 0.F; { _tmp37 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp37 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp37 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp37 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp37 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp37 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp37 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp37 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp37 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp37 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp37 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp37 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp37 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp37 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp37 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp37 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp37 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp37 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 2 * (int)blockDim.y < iter_height) { float _tmp38 = 0.F; { _tmp38 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp38 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp38 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp38 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp38 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp38 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp38 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp38 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp38 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp38 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp38 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp38 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp38 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp38 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp38 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp38 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp38 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp38 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 3 * (int)blockDim.y < iter_height) { float _tmp39 = 0.F; { _tmp39 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp39 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp39 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp39 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp39 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp39 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp39 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp39 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp39 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp39 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp39 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp39 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp39 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp39 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp39 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp39 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp39 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp39 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 4 * (int)blockDim.y < iter_height) { float _tmp40 = 0.F; { _tmp40 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp40 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp40 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp40 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp40 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp40 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp40 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp40 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp40 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp40 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp40 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp40 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp40 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp40 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp40 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp40 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp40 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp40 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 5 * (int)blockDim.y < iter_height) { float _tmp41 = 0.F; { _tmp41 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp41 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp41 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp41 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp41 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp41 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp41 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp41 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp41 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp41 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp41 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp41 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp41 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp41 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp41 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp41 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp41 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp41 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 6 * (int)blockDim.y < iter_height) { float _tmp42 = 0.F; { _tmp42 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp42 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp42 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp42 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp42 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp42 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp42 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp42 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp42 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp42 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp42 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp42 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp42 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp42 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp42 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp42 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp42 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp42 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 7 * (int)blockDim.y < iter_height) { float _tmp43 = 0.F; { _tmp43 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp43 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp43 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp43 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp43 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp43 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp43 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp43 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp43 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp43 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp43 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp43 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp43 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp43 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp43 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp43 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp43 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp43 + 0.5F); } } } goto BH_EXIT; BH_TL: { int _gid_x44 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y44 = gid_y + (-2); if (_gid_x44 < 0) _gid_x44 = 0; if (_gid_y44 < 0) _gid_y44 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y44) * input_stride + _gid_x44); int _gid_x45 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y45 = gid_y + (-2); if (_gid_x45 < 0) _gid_x45 = 0; if (_gid_y45 < 0) _gid_y45 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y45) * input_stride + _gid_x45); int _gid_x46 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y46 = gid_y + (-2); if (_gid_x46 < 0) _gid_x46 = 0; if (_gid_y46 < 0) _gid_y46 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y46) * input_stride + _gid_x46); int _gid_x47 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y47 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x47 < 0) _gid_x47 = 0; if (_gid_y47 < 0) _gid_y47 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y47) * input_stride + _gid_x47); int _gid_x48 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y48 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x48 < 0) _gid_x48 = 0; if (_gid_y48 < 0) _gid_y48 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y48) * input_stride + _gid_x48); int _gid_x49 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y49 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x49 < 0) _gid_x49 = 0; if (_gid_y49 < 0) _gid_y49 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y49) * input_stride + _gid_x49); int _gid_x50 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y50 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x50 < 0) _gid_x50 = 0; if (_gid_y50 < 0) _gid_y50 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y50) * input_stride + _gid_x50); int _gid_x51 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y51 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x51 < 0) _gid_x51 = 0; if (_gid_y51 < 0) _gid_y51 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y51) * input_stride + _gid_x51); int _gid_x52 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y52 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x52 < 0) _gid_x52 = 0; if (_gid_y52 < 0) _gid_y52 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y52) * input_stride + _gid_x52); int _gid_x53 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y53 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x53 < 0) _gid_x53 = 0; if (_gid_y53 < 0) _gid_y53 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y53) * input_stride + _gid_x53); int _gid_x54 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y54 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x54 < 0) _gid_x54 = 0; if (_gid_y54 < 0) _gid_y54 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y54) * input_stride + _gid_x54); int _gid_x55 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y55 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x55 < 0) _gid_x55 = 0; if (_gid_y55 < 0) _gid_y55 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y55) * input_stride + _gid_x55); int _gid_x56 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y56 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x56 < 0) _gid_x56 = 0; if (_gid_y56 < 0) _gid_y56 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y56) * input_stride + _gid_x56); int _gid_x57 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y57 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x57 < 0) _gid_x57 = 0; if (_gid_y57 < 0) _gid_y57 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y57) * input_stride + _gid_x57); int _gid_x58 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y58 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x58 < 0) _gid_x58 = 0; if (_gid_y58 < 0) _gid_y58 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y58) * input_stride + _gid_x58); int _gid_x59 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y59 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x59 < 0) _gid_x59 = 0; if (_gid_y59 < 0) _gid_y59 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y59) * input_stride + _gid_x59); int _gid_x60 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y60 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x60 < 0) _gid_x60 = 0; if (_gid_y60 < 0) _gid_y60 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y60) * input_stride + _gid_x60); int _gid_x61 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y61 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x61 < 0) _gid_x61 = 0; if (_gid_y61 < 0) _gid_y61 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y61) * input_stride + _gid_x61); int _gid_x62 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y62 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x62 < 0) _gid_x62 = 0; if (_gid_y62 < 0) _gid_y62 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y62) * input_stride + _gid_x62); int _gid_x63 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y63 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x63 < 0) _gid_x63 = 0; if (_gid_y63 < 0) _gid_y63 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y63) * input_stride + _gid_x63); int _gid_x64 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y64 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x64 < 0) _gid_x64 = 0; if (_gid_y64 < 0) _gid_y64 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y64) * input_stride + _gid_x64); int _gid_x65 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y65 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x65 < 0) _gid_x65 = 0; if (_gid_y65 < 0) _gid_y65 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y65) * input_stride + _gid_x65); int _gid_x66 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y66 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x66 < 0) _gid_x66 = 0; if (_gid_y66 < 0) _gid_y66 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y66) * input_stride + _gid_x66); int _gid_x67 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y67 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x67 < 0) _gid_x67 = 0; if (_gid_y67 < 0) _gid_y67 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y67) * input_stride + _gid_x67); int _gid_x68 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y68 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x68 < 0) _gid_x68 = 0; if (_gid_y68 < 0) _gid_y68 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y68) * input_stride + _gid_x68); int _gid_x69 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y69 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x69 < 0) _gid_x69 = 0; if (_gid_y69 < 0) _gid_y69 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y69) * input_stride + _gid_x69); int _gid_x70 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y70 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x70 < 0) _gid_x70 = 0; if (_gid_y70 < 0) _gid_y70 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y70) * input_stride + _gid_x70); int _gid_x71 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y71 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x71 < 0) _gid_x71 = 0; if (_gid_y71 < 0) _gid_y71 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y71) * input_stride + _gid_x71); int _gid_x72 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y72 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x72 < 0) _gid_x72 = 0; if (_gid_y72 < 0) _gid_y72 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y72) * input_stride + _gid_x72); int _gid_x73 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y73 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x73 < 0) _gid_x73 = 0; if (_gid_y73 < 0) _gid_y73 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y73) * input_stride + _gid_x73); int _gid_x74 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y74 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x74 < 0) _gid_x74 = 0; if (_gid_y74 < 0) _gid_y74 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y74) * input_stride + _gid_x74); int _gid_x75 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y75 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x75 < 0) _gid_x75 = 0; if (_gid_y75 < 0) _gid_y75 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y75) * input_stride + _gid_x75); int _gid_x76 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y76 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x76 < 0) _gid_x76 = 0; if (_gid_y76 < 0) _gid_y76 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y76) * input_stride + _gid_x76); int _gid_x77 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y77 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x77 < 0) _gid_x77 = 0; if (_gid_y77 < 0) _gid_y77 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y77) * input_stride + _gid_x77); int _gid_x78 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y78 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x78 < 0) _gid_x78 = 0; if (_gid_y78 < 0) _gid_y78 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y78) * input_stride + _gid_x78); int _gid_x79 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y79 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x79 < 0) _gid_x79 = 0; if (_gid_y79 < 0) _gid_y79 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y79) * input_stride + _gid_x79); __syncthreads(); { float _tmp80 = 0.F; { _tmp80 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp80 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp80 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp80 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp80 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp80 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp80 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp80 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp80 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp80 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp80 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp80 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp80 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp80 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp80 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp80 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp80 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp80 + 0.5F); } { float _tmp81 = 0.F; { _tmp81 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp81 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp81 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp81 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp81 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp81 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp81 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp81 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp81 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp81 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp81 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp81 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp81 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp81 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp81 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp81 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp81 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp81 + 0.5F); } { float _tmp82 = 0.F; { _tmp82 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp82 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp82 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp82 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp82 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp82 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp82 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp82 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp82 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp82 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp82 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp82 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp82 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp82 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp82 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp82 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp82 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp82 + 0.5F); } { float _tmp83 = 0.F; { _tmp83 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp83 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp83 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp83 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp83 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp83 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp83 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp83 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp83 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp83 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp83 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp83 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp83 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp83 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp83 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp83 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp83 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp83 + 0.5F); } { float _tmp84 = 0.F; { _tmp84 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp84 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp84 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp84 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp84 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp84 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp84 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp84 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp84 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp84 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp84 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp84 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp84 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp84 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp84 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp84 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp84 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp84 + 0.5F); } { float _tmp85 = 0.F; { _tmp85 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp85 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp85 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp85 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp85 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp85 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp85 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp85 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp85 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp85 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp85 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp85 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp85 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp85 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp85 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp85 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp85 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp85 + 0.5F); } { float _tmp86 = 0.F; { _tmp86 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp86 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp86 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp86 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp86 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp86 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp86 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp86 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp86 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp86 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp86 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp86 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp86 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp86 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp86 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp86 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp86 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp86 + 0.5F); } { float _tmp87 = 0.F; { _tmp87 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp87 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp87 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp87 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp87 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp87 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp87 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp87 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp87 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp87 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp87 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp87 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp87 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp87 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp87 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp87 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp87 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp87 + 0.5F); } } goto BH_EXIT; BH_TR: { int _gid_x88 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y88 = gid_y + (-2); if (_gid_x88 >= input_width) _gid_x88 = input_width - 1; if (_gid_y88 < 0) _gid_y88 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y88) * input_stride + _gid_x88); int _gid_x89 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y89 = gid_y + (-2); if (_gid_x89 >= input_width) _gid_x89 = input_width - 1; if (_gid_y89 < 0) _gid_y89 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y89) * input_stride + _gid_x89); int _gid_x90 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y90 = gid_y + (-2); if (_gid_x90 >= input_width) _gid_x90 = input_width - 1; if (_gid_y90 < 0) _gid_y90 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y90) * input_stride + _gid_x90); int _gid_x91 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y91 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x91 >= input_width) _gid_x91 = input_width - 1; if (_gid_y91 < 0) _gid_y91 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y91) * input_stride + _gid_x91); int _gid_x92 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y92 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x92 >= input_width) _gid_x92 = input_width - 1; if (_gid_y92 < 0) _gid_y92 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y92) * input_stride + _gid_x92); int _gid_x93 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y93 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x93 >= input_width) _gid_x93 = input_width - 1; if (_gid_y93 < 0) _gid_y93 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y93) * input_stride + _gid_x93); int _gid_x94 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y94 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x94 >= input_width) _gid_x94 = input_width - 1; if (_gid_y94 < 0) _gid_y94 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y94) * input_stride + _gid_x94); int _gid_x95 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y95 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x95 >= input_width) _gid_x95 = input_width - 1; if (_gid_y95 < 0) _gid_y95 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y95) * input_stride + _gid_x95); int _gid_x96 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y96 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x96 >= input_width) _gid_x96 = input_width - 1; if (_gid_y96 < 0) _gid_y96 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y96) * input_stride + _gid_x96); int _gid_x97 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y97 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x97 >= input_width) _gid_x97 = input_width - 1; if (_gid_y97 < 0) _gid_y97 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y97) * input_stride + _gid_x97); int _gid_x98 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y98 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x98 >= input_width) _gid_x98 = input_width - 1; if (_gid_y98 < 0) _gid_y98 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y98) * input_stride + _gid_x98); int _gid_x99 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y99 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x99 >= input_width) _gid_x99 = input_width - 1; if (_gid_y99 < 0) _gid_y99 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y99) * input_stride + _gid_x99); int _gid_x100 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y100 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x100 >= input_width) _gid_x100 = input_width - 1; if (_gid_y100 < 0) _gid_y100 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y100) * input_stride + _gid_x100); int _gid_x101 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y101 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x101 >= input_width) _gid_x101 = input_width - 1; if (_gid_y101 < 0) _gid_y101 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y101) * input_stride + _gid_x101); int _gid_x102 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y102 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x102 >= input_width) _gid_x102 = input_width - 1; if (_gid_y102 < 0) _gid_y102 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y102) * input_stride + _gid_x102); int _gid_x103 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y103 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x103 >= input_width) _gid_x103 = input_width - 1; if (_gid_y103 < 0) _gid_y103 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y103) * input_stride + _gid_x103); int _gid_x104 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y104 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x104 >= input_width) _gid_x104 = input_width - 1; if (_gid_y104 < 0) _gid_y104 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y104) * input_stride + _gid_x104); int _gid_x105 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y105 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x105 >= input_width) _gid_x105 = input_width - 1; if (_gid_y105 < 0) _gid_y105 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y105) * input_stride + _gid_x105); int _gid_x106 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y106 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x106 >= input_width) _gid_x106 = input_width - 1; if (_gid_y106 < 0) _gid_y106 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y106) * input_stride + _gid_x106); int _gid_x107 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y107 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x107 >= input_width) _gid_x107 = input_width - 1; if (_gid_y107 < 0) _gid_y107 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y107) * input_stride + _gid_x107); int _gid_x108 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y108 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x108 >= input_width) _gid_x108 = input_width - 1; if (_gid_y108 < 0) _gid_y108 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y108) * input_stride + _gid_x108); int _gid_x109 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y109 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x109 >= input_width) _gid_x109 = input_width - 1; if (_gid_y109 < 0) _gid_y109 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y109) * input_stride + _gid_x109); int _gid_x110 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y110 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x110 >= input_width) _gid_x110 = input_width - 1; if (_gid_y110 < 0) _gid_y110 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y110) * input_stride + _gid_x110); int _gid_x111 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y111 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x111 >= input_width) _gid_x111 = input_width - 1; if (_gid_y111 < 0) _gid_y111 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y111) * input_stride + _gid_x111); int _gid_x112 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y112 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x112 >= input_width) _gid_x112 = input_width - 1; if (_gid_y112 < 0) _gid_y112 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y112) * input_stride + _gid_x112); int _gid_x113 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y113 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x113 >= input_width) _gid_x113 = input_width - 1; if (_gid_y113 < 0) _gid_y113 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y113) * input_stride + _gid_x113); int _gid_x114 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y114 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x114 >= input_width) _gid_x114 = input_width - 1; if (_gid_y114 < 0) _gid_y114 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y114) * input_stride + _gid_x114); int _gid_x115 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y115 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x115 >= input_width) _gid_x115 = input_width - 1; if (_gid_y115 < 0) _gid_y115 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y115) * input_stride + _gid_x115); int _gid_x116 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y116 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x116 >= input_width) _gid_x116 = input_width - 1; if (_gid_y116 < 0) _gid_y116 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y116) * input_stride + _gid_x116); int _gid_x117 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y117 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x117 >= input_width) _gid_x117 = input_width - 1; if (_gid_y117 < 0) _gid_y117 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y117) * input_stride + _gid_x117); int _gid_x118 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y118 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x118 >= input_width) _gid_x118 = input_width - 1; if (_gid_y118 < 0) _gid_y118 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y118) * input_stride + _gid_x118); int _gid_x119 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y119 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x119 >= input_width) _gid_x119 = input_width - 1; if (_gid_y119 < 0) _gid_y119 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y119) * input_stride + _gid_x119); int _gid_x120 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y120 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x120 >= input_width) _gid_x120 = input_width - 1; if (_gid_y120 < 0) _gid_y120 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y120) * input_stride + _gid_x120); int _gid_x121 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y121 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x121 >= input_width) _gid_x121 = input_width - 1; if (_gid_y121 < 0) _gid_y121 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y121) * input_stride + _gid_x121); int _gid_x122 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y122 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x122 >= input_width) _gid_x122 = input_width - 1; if (_gid_y122 < 0) _gid_y122 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y122) * input_stride + _gid_x122); int _gid_x123 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y123 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x123 >= input_width) _gid_x123 = input_width - 1; if (_gid_y123 < 0) _gid_y123 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y123) * input_stride + _gid_x123); __syncthreads(); if (gid_x < iter_width) { { float _tmp124 = 0.F; { _tmp124 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp124 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp124 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp124 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp124 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp124 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp124 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp124 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp124 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp124 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp124 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp124 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp124 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp124 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp124 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp124 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp124 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp124 + 0.5F); } } if (gid_x < iter_width) { { float _tmp125 = 0.F; { _tmp125 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp125 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp125 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp125 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp125 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp125 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp125 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp125 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp125 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp125 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp125 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp125 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp125 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp125 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp125 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp125 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp125 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp125 + 0.5F); } } if (gid_x < iter_width) { { float _tmp126 = 0.F; { _tmp126 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp126 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp126 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp126 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp126 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp126 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp126 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp126 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp126 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp126 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp126 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp126 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp126 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp126 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp126 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp126 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp126 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp126 + 0.5F); } } if (gid_x < iter_width) { { float _tmp127 = 0.F; { _tmp127 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp127 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp127 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp127 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp127 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp127 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp127 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp127 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp127 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp127 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp127 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp127 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp127 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp127 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp127 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp127 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp127 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp127 + 0.5F); } } if (gid_x < iter_width) { { float _tmp128 = 0.F; { _tmp128 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp128 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp128 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp128 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp128 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp128 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp128 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp128 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp128 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp128 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp128 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp128 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp128 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp128 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp128 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp128 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp128 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp128 + 0.5F); } } if (gid_x < iter_width) { { float _tmp129 = 0.F; { _tmp129 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp129 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp129 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp129 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp129 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp129 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp129 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp129 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp129 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp129 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp129 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp129 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp129 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp129 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp129 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp129 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp129 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp129 + 0.5F); } } if (gid_x < iter_width) { { float _tmp130 = 0.F; { _tmp130 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp130 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp130 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp130 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp130 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp130 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp130 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp130 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp130 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp130 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp130 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp130 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp130 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp130 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp130 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp130 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp130 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp130 + 0.5F); } } if (gid_x < iter_width) { { float _tmp131 = 0.F; { _tmp131 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp131 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp131 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp131 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp131 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp131 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp131 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp131 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp131 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp131 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp131 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp131 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp131 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp131 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp131 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp131 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp131 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp131 + 0.5F); } } } goto BH_EXIT; BH_T: { int _gid_x132 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y132 = gid_y + (-2); if (_gid_y132 < 0) _gid_y132 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y132) * input_stride + _gid_x132); int _gid_x133 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y133 = gid_y + (-2); if (_gid_y133 < 0) _gid_y133 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y133) * input_stride + _gid_x133); int _gid_x134 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y134 = gid_y + (-2); if (_gid_y134 < 0) _gid_y134 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y134) * input_stride + _gid_x134); int _gid_x135 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y135 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y135 < 0) _gid_y135 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y135) * input_stride + _gid_x135); int _gid_x136 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y136 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y136 < 0) _gid_y136 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y136) * input_stride + _gid_x136); int _gid_x137 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y137 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y137 < 0) _gid_y137 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y137) * input_stride + _gid_x137); int _gid_x138 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y138 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y138 < 0) _gid_y138 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y138) * input_stride + _gid_x138); int _gid_x139 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y139 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y139 < 0) _gid_y139 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y139) * input_stride + _gid_x139); int _gid_x140 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y140 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y140 < 0) _gid_y140 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y140) * input_stride + _gid_x140); int _gid_x141 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y141 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y141 < 0) _gid_y141 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y141) * input_stride + _gid_x141); int _gid_x142 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y142 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y142 < 0) _gid_y142 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y142) * input_stride + _gid_x142); int _gid_x143 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y143 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y143 < 0) _gid_y143 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y143) * input_stride + _gid_x143); int _gid_x144 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y144 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y144 < 0) _gid_y144 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y144) * input_stride + _gid_x144); int _gid_x145 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y145 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y145 < 0) _gid_y145 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y145) * input_stride + _gid_x145); int _gid_x146 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y146 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y146 < 0) _gid_y146 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y146) * input_stride + _gid_x146); int _gid_x147 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y147 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y147 < 0) _gid_y147 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y147) * input_stride + _gid_x147); int _gid_x148 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y148 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y148 < 0) _gid_y148 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y148) * input_stride + _gid_x148); int _gid_x149 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y149 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y149 < 0) _gid_y149 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y149) * input_stride + _gid_x149); int _gid_x150 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y150 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y150 < 0) _gid_y150 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y150) * input_stride + _gid_x150); int _gid_x151 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y151 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y151 < 0) _gid_y151 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y151) * input_stride + _gid_x151); int _gid_x152 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y152 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y152 < 0) _gid_y152 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y152) * input_stride + _gid_x152); int _gid_x153 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y153 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y153 < 0) _gid_y153 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y153) * input_stride + _gid_x153); int _gid_x154 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y154 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y154 < 0) _gid_y154 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y154) * input_stride + _gid_x154); int _gid_x155 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y155 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y155 < 0) _gid_y155 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y155) * input_stride + _gid_x155); int _gid_x156 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y156 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y156 < 0) _gid_y156 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y156) * input_stride + _gid_x156); int _gid_x157 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y157 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y157 < 0) _gid_y157 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y157) * input_stride + _gid_x157); int _gid_x158 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y158 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y158 < 0) _gid_y158 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y158) * input_stride + _gid_x158); int _gid_x159 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y159 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y159 < 0) _gid_y159 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y159) * input_stride + _gid_x159); int _gid_x160 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y160 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y160 < 0) _gid_y160 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y160) * input_stride + _gid_x160); int _gid_x161 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y161 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y161 < 0) _gid_y161 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y161) * input_stride + _gid_x161); int _gid_x162 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y162 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y162 < 0) _gid_y162 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y162) * input_stride + _gid_x162); int _gid_x163 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y163 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y163 < 0) _gid_y163 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y163) * input_stride + _gid_x163); int _gid_x164 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y164 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y164 < 0) _gid_y164 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y164) * input_stride + _gid_x164); int _gid_x165 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y165 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y165 < 0) _gid_y165 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y165) * input_stride + _gid_x165); int _gid_x166 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y166 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y166 < 0) _gid_y166 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y166) * input_stride + _gid_x166); int _gid_x167 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y167 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y167 < 0) _gid_y167 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y167) * input_stride + _gid_x167); __syncthreads(); { float _tmp168 = 0.F; { _tmp168 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp168 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp168 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp168 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp168 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp168 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp168 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp168 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp168 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp168 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp168 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp168 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp168 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp168 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp168 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp168 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp168 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp168 + 0.5F); } { float _tmp169 = 0.F; { _tmp169 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp169 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp169 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp169 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp169 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp169 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp169 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp169 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp169 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp169 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp169 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp169 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp169 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp169 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp169 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp169 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp169 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp169 + 0.5F); } { float _tmp170 = 0.F; { _tmp170 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp170 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp170 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp170 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp170 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp170 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp170 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp170 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp170 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp170 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp170 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp170 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp170 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp170 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp170 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp170 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp170 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp170 + 0.5F); } { float _tmp171 = 0.F; { _tmp171 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp171 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp171 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp171 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp171 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp171 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp171 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp171 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp171 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp171 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp171 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp171 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp171 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp171 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp171 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp171 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp171 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp171 + 0.5F); } { float _tmp172 = 0.F; { _tmp172 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp172 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp172 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp172 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp172 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp172 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp172 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp172 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp172 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp172 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp172 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp172 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp172 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp172 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp172 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp172 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp172 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp172 + 0.5F); } { float _tmp173 = 0.F; { _tmp173 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp173 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp173 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp173 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp173 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp173 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp173 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp173 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp173 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp173 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp173 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp173 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp173 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp173 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp173 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp173 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp173 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp173 + 0.5F); } { float _tmp174 = 0.F; { _tmp174 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp174 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp174 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp174 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp174 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp174 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp174 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp174 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp174 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp174 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp174 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp174 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp174 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp174 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp174 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp174 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp174 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp174 + 0.5F); } { float _tmp175 = 0.F; { _tmp175 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp175 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp175 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp175 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp175 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp175 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp175 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp175 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp175 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp175 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp175 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp175 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp175 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp175 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp175 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp175 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp175 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp175 + 0.5F); } } goto BH_EXIT; BH_BL: { int _gid_x176 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y176 = gid_y + (-2); if (_gid_y176 >= input_height) _gid_y176 = input_height - 1; if (_gid_x176 < 0) _gid_x176 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y176) * input_stride + _gid_x176); int _gid_x177 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y177 = gid_y + (-2); if (_gid_y177 >= input_height) _gid_y177 = input_height - 1; if (_gid_x177 < 0) _gid_x177 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y177) * input_stride + _gid_x177); int _gid_x178 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y178 = gid_y + (-2); if (_gid_y178 >= input_height) _gid_y178 = input_height - 1; if (_gid_x178 < 0) _gid_x178 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y178) * input_stride + _gid_x178); int _gid_x179 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y179 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y179 >= input_height) _gid_y179 = input_height - 1; if (_gid_x179 < 0) _gid_x179 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y179) * input_stride + _gid_x179); int _gid_x180 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y180 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y180 >= input_height) _gid_y180 = input_height - 1; if (_gid_x180 < 0) _gid_x180 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y180) * input_stride + _gid_x180); int _gid_x181 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y181 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y181 >= input_height) _gid_y181 = input_height - 1; if (_gid_x181 < 0) _gid_x181 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y181) * input_stride + _gid_x181); int _gid_x182 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y182 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y182 >= input_height) _gid_y182 = input_height - 1; if (_gid_x182 < 0) _gid_x182 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y182) * input_stride + _gid_x182); int _gid_x183 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y183 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y183 >= input_height) _gid_y183 = input_height - 1; if (_gid_x183 < 0) _gid_x183 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y183) * input_stride + _gid_x183); int _gid_x184 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y184 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y184 >= input_height) _gid_y184 = input_height - 1; if (_gid_x184 < 0) _gid_x184 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y184) * input_stride + _gid_x184); int _gid_x185 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y185 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y185 >= input_height) _gid_y185 = input_height - 1; if (_gid_x185 < 0) _gid_x185 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y185) * input_stride + _gid_x185); int _gid_x186 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y186 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y186 >= input_height) _gid_y186 = input_height - 1; if (_gid_x186 < 0) _gid_x186 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y186) * input_stride + _gid_x186); int _gid_x187 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y187 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y187 >= input_height) _gid_y187 = input_height - 1; if (_gid_x187 < 0) _gid_x187 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y187) * input_stride + _gid_x187); int _gid_x188 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y188 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y188 >= input_height) _gid_y188 = input_height - 1; if (_gid_x188 < 0) _gid_x188 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y188) * input_stride + _gid_x188); int _gid_x189 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y189 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y189 >= input_height) _gid_y189 = input_height - 1; if (_gid_x189 < 0) _gid_x189 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y189) * input_stride + _gid_x189); int _gid_x190 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y190 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y190 >= input_height) _gid_y190 = input_height - 1; if (_gid_x190 < 0) _gid_x190 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y190) * input_stride + _gid_x190); int _gid_x191 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y191 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y191 >= input_height) _gid_y191 = input_height - 1; if (_gid_x191 < 0) _gid_x191 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y191) * input_stride + _gid_x191); int _gid_x192 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y192 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y192 >= input_height) _gid_y192 = input_height - 1; if (_gid_x192 < 0) _gid_x192 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y192) * input_stride + _gid_x192); int _gid_x193 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y193 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y193 >= input_height) _gid_y193 = input_height - 1; if (_gid_x193 < 0) _gid_x193 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y193) * input_stride + _gid_x193); int _gid_x194 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y194 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y194 >= input_height) _gid_y194 = input_height - 1; if (_gid_x194 < 0) _gid_x194 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y194) * input_stride + _gid_x194); int _gid_x195 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y195 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y195 >= input_height) _gid_y195 = input_height - 1; if (_gid_x195 < 0) _gid_x195 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y195) * input_stride + _gid_x195); int _gid_x196 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y196 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y196 >= input_height) _gid_y196 = input_height - 1; if (_gid_x196 < 0) _gid_x196 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y196) * input_stride + _gid_x196); int _gid_x197 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y197 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y197 >= input_height) _gid_y197 = input_height - 1; if (_gid_x197 < 0) _gid_x197 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y197) * input_stride + _gid_x197); int _gid_x198 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y198 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y198 >= input_height) _gid_y198 = input_height - 1; if (_gid_x198 < 0) _gid_x198 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y198) * input_stride + _gid_x198); int _gid_x199 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y199 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y199 >= input_height) _gid_y199 = input_height - 1; if (_gid_x199 < 0) _gid_x199 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y199) * input_stride + _gid_x199); int _gid_x200 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y200 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y200 >= input_height) _gid_y200 = input_height - 1; if (_gid_x200 < 0) _gid_x200 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y200) * input_stride + _gid_x200); int _gid_x201 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y201 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y201 >= input_height) _gid_y201 = input_height - 1; if (_gid_x201 < 0) _gid_x201 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y201) * input_stride + _gid_x201); int _gid_x202 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y202 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y202 >= input_height) _gid_y202 = input_height - 1; if (_gid_x202 < 0) _gid_x202 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y202) * input_stride + _gid_x202); int _gid_x203 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y203 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y203 >= input_height) _gid_y203 = input_height - 1; if (_gid_x203 < 0) _gid_x203 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y203) * input_stride + _gid_x203); int _gid_x204 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y204 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y204 >= input_height) _gid_y204 = input_height - 1; if (_gid_x204 < 0) _gid_x204 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y204) * input_stride + _gid_x204); int _gid_x205 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y205 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y205 >= input_height) _gid_y205 = input_height - 1; if (_gid_x205 < 0) _gid_x205 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y205) * input_stride + _gid_x205); int _gid_x206 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y206 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y206 >= input_height) _gid_y206 = input_height - 1; if (_gid_x206 < 0) _gid_x206 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y206) * input_stride + _gid_x206); int _gid_x207 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y207 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y207 >= input_height) _gid_y207 = input_height - 1; if (_gid_x207 < 0) _gid_x207 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y207) * input_stride + _gid_x207); int _gid_x208 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y208 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y208 >= input_height) _gid_y208 = input_height - 1; if (_gid_x208 < 0) _gid_x208 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y208) * input_stride + _gid_x208); int _gid_x209 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y209 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y209 >= input_height) _gid_y209 = input_height - 1; if (_gid_x209 < 0) _gid_x209 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y209) * input_stride + _gid_x209); int _gid_x210 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y210 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y210 >= input_height) _gid_y210 = input_height - 1; if (_gid_x210 < 0) _gid_x210 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y210) * input_stride + _gid_x210); int _gid_x211 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y211 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y211 >= input_height) _gid_y211 = input_height - 1; if (_gid_x211 < 0) _gid_x211 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y211) * input_stride + _gid_x211); __syncthreads(); if (gid_y < iter_height) { float _tmp212 = 0.F; { _tmp212 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp212 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp212 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp212 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp212 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp212 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp212 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp212 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp212 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp212 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp212 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp212 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp212 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp212 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp212 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp212 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp212 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp212 + 0.5F); } if (gid_y + 1 * (int)blockDim.y < iter_height) { float _tmp213 = 0.F; { _tmp213 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp213 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp213 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp213 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp213 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp213 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp213 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp213 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp213 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp213 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp213 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp213 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp213 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp213 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp213 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp213 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp213 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp213 + 0.5F); } if (gid_y + 2 * (int)blockDim.y < iter_height) { float _tmp214 = 0.F; { _tmp214 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp214 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp214 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp214 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp214 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp214 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp214 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp214 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp214 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp214 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp214 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp214 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp214 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp214 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp214 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp214 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp214 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp214 + 0.5F); } if (gid_y + 3 * (int)blockDim.y < iter_height) { float _tmp215 = 0.F; { _tmp215 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp215 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp215 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp215 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp215 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp215 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp215 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp215 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp215 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp215 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp215 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp215 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp215 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp215 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp215 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp215 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp215 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp215 + 0.5F); } if (gid_y + 4 * (int)blockDim.y < iter_height) { float _tmp216 = 0.F; { _tmp216 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp216 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp216 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp216 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp216 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp216 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp216 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp216 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp216 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp216 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp216 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp216 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp216 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp216 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp216 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp216 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp216 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp216 + 0.5F); } if (gid_y + 5 * (int)blockDim.y < iter_height) { float _tmp217 = 0.F; { _tmp217 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp217 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp217 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp217 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp217 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp217 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp217 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp217 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp217 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp217 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp217 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp217 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp217 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp217 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp217 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp217 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp217 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp217 + 0.5F); } if (gid_y + 6 * (int)blockDim.y < iter_height) { float _tmp218 = 0.F; { _tmp218 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp218 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp218 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp218 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp218 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp218 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp218 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp218 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp218 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp218 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp218 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp218 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp218 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp218 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp218 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp218 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp218 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp218 + 0.5F); } if (gid_y + 7 * (int)blockDim.y < iter_height) { float _tmp219 = 0.F; { _tmp219 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp219 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp219 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp219 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp219 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp219 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp219 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp219 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp219 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp219 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp219 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp219 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp219 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp219 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp219 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp219 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp219 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp219 + 0.5F); } } goto BH_EXIT; BH_BR: { int _gid_x220 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y220 = gid_y + (-2); if (_gid_x220 >= input_width) _gid_x220 = input_width - 1; if (_gid_y220 >= input_height) _gid_y220 = input_height - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y220) * input_stride + _gid_x220); int _gid_x221 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y221 = gid_y + (-2); if (_gid_x221 >= input_width) _gid_x221 = input_width - 1; if (_gid_y221 >= input_height) _gid_y221 = input_height - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y221) * input_stride + _gid_x221); int _gid_x222 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y222 = gid_y + (-2); if (_gid_x222 >= input_width) _gid_x222 = input_width - 1; if (_gid_y222 >= input_height) _gid_y222 = input_height - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y222) * input_stride + _gid_x222); int _gid_x223 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y223 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x223 >= input_width) _gid_x223 = input_width - 1; if (_gid_y223 >= input_height) _gid_y223 = input_height - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y223) * input_stride + _gid_x223); int _gid_x224 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y224 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x224 >= input_width) _gid_x224 = input_width - 1; if (_gid_y224 >= input_height) _gid_y224 = input_height - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y224) * input_stride + _gid_x224); int _gid_x225 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y225 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x225 >= input_width) _gid_x225 = input_width - 1; if (_gid_y225 >= input_height) _gid_y225 = input_height - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y225) * input_stride + _gid_x225); int _gid_x226 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y226 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x226 >= input_width) _gid_x226 = input_width - 1; if (_gid_y226 >= input_height) _gid_y226 = input_height - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y226) * input_stride + _gid_x226); int _gid_x227 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y227 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x227 >= input_width) _gid_x227 = input_width - 1; if (_gid_y227 >= input_height) _gid_y227 = input_height - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y227) * input_stride + _gid_x227); int _gid_x228 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y228 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x228 >= input_width) _gid_x228 = input_width - 1; if (_gid_y228 >= input_height) _gid_y228 = input_height - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y228) * input_stride + _gid_x228); int _gid_x229 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y229 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x229 >= input_width) _gid_x229 = input_width - 1; if (_gid_y229 >= input_height) _gid_y229 = input_height - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y229) * input_stride + _gid_x229); int _gid_x230 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y230 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x230 >= input_width) _gid_x230 = input_width - 1; if (_gid_y230 >= input_height) _gid_y230 = input_height - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y230) * input_stride + _gid_x230); int _gid_x231 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y231 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x231 >= input_width) _gid_x231 = input_width - 1; if (_gid_y231 >= input_height) _gid_y231 = input_height - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y231) * input_stride + _gid_x231); int _gid_x232 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y232 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x232 >= input_width) _gid_x232 = input_width - 1; if (_gid_y232 >= input_height) _gid_y232 = input_height - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y232) * input_stride + _gid_x232); int _gid_x233 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y233 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x233 >= input_width) _gid_x233 = input_width - 1; if (_gid_y233 >= input_height) _gid_y233 = input_height - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y233) * input_stride + _gid_x233); int _gid_x234 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y234 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x234 >= input_width) _gid_x234 = input_width - 1; if (_gid_y234 >= input_height) _gid_y234 = input_height - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y234) * input_stride + _gid_x234); int _gid_x235 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y235 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x235 >= input_width) _gid_x235 = input_width - 1; if (_gid_y235 >= input_height) _gid_y235 = input_height - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y235) * input_stride + _gid_x235); int _gid_x236 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y236 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x236 >= input_width) _gid_x236 = input_width - 1; if (_gid_y236 >= input_height) _gid_y236 = input_height - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y236) * input_stride + _gid_x236); int _gid_x237 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y237 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x237 >= input_width) _gid_x237 = input_width - 1; if (_gid_y237 >= input_height) _gid_y237 = input_height - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y237) * input_stride + _gid_x237); int _gid_x238 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y238 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x238 >= input_width) _gid_x238 = input_width - 1; if (_gid_y238 >= input_height) _gid_y238 = input_height - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y238) * input_stride + _gid_x238); int _gid_x239 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y239 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x239 >= input_width) _gid_x239 = input_width - 1; if (_gid_y239 >= input_height) _gid_y239 = input_height - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y239) * input_stride + _gid_x239); int _gid_x240 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y240 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x240 >= input_width) _gid_x240 = input_width - 1; if (_gid_y240 >= input_height) _gid_y240 = input_height - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y240) * input_stride + _gid_x240); int _gid_x241 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y241 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x241 >= input_width) _gid_x241 = input_width - 1; if (_gid_y241 >= input_height) _gid_y241 = input_height - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y241) * input_stride + _gid_x241); int _gid_x242 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y242 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x242 >= input_width) _gid_x242 = input_width - 1; if (_gid_y242 >= input_height) _gid_y242 = input_height - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y242) * input_stride + _gid_x242); int _gid_x243 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y243 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x243 >= input_width) _gid_x243 = input_width - 1; if (_gid_y243 >= input_height) _gid_y243 = input_height - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y243) * input_stride + _gid_x243); int _gid_x244 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y244 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x244 >= input_width) _gid_x244 = input_width - 1; if (_gid_y244 >= input_height) _gid_y244 = input_height - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y244) * input_stride + _gid_x244); int _gid_x245 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y245 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x245 >= input_width) _gid_x245 = input_width - 1; if (_gid_y245 >= input_height) _gid_y245 = input_height - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y245) * input_stride + _gid_x245); int _gid_x246 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y246 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x246 >= input_width) _gid_x246 = input_width - 1; if (_gid_y246 >= input_height) _gid_y246 = input_height - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y246) * input_stride + _gid_x246); int _gid_x247 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y247 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x247 >= input_width) _gid_x247 = input_width - 1; if (_gid_y247 >= input_height) _gid_y247 = input_height - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y247) * input_stride + _gid_x247); int _gid_x248 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y248 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x248 >= input_width) _gid_x248 = input_width - 1; if (_gid_y248 >= input_height) _gid_y248 = input_height - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y248) * input_stride + _gid_x248); int _gid_x249 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y249 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x249 >= input_width) _gid_x249 = input_width - 1; if (_gid_y249 >= input_height) _gid_y249 = input_height - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y249) * input_stride + _gid_x249); int _gid_x250 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y250 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x250 >= input_width) _gid_x250 = input_width - 1; if (_gid_y250 >= input_height) _gid_y250 = input_height - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y250) * input_stride + _gid_x250); int _gid_x251 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y251 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x251 >= input_width) _gid_x251 = input_width - 1; if (_gid_y251 >= input_height) _gid_y251 = input_height - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y251) * input_stride + _gid_x251); int _gid_x252 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y252 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x252 >= input_width) _gid_x252 = input_width - 1; if (_gid_y252 >= input_height) _gid_y252 = input_height - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y252) * input_stride + _gid_x252); int _gid_x253 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y253 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x253 >= input_width) _gid_x253 = input_width - 1; if (_gid_y253 >= input_height) _gid_y253 = input_height - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y253) * input_stride + _gid_x253); int _gid_x254 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y254 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x254 >= input_width) _gid_x254 = input_width - 1; if (_gid_y254 >= input_height) _gid_y254 = input_height - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y254) * input_stride + _gid_x254); int _gid_x255 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y255 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x255 >= input_width) _gid_x255 = input_width - 1; if (_gid_y255 >= input_height) _gid_y255 = input_height - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y255) * input_stride + _gid_x255); __syncthreads(); if (gid_x < iter_width) { if (gid_y < iter_height) { float _tmp256 = 0.F; { _tmp256 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp256 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp256 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp256 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp256 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp256 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp256 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp256 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp256 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp256 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp256 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp256 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp256 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp256 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp256 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp256 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp256 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp256 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 1 * (int)blockDim.y < iter_height) { float _tmp257 = 0.F; { _tmp257 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp257 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp257 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp257 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp257 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp257 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp257 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp257 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp257 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp257 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp257 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp257 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp257 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp257 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp257 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp257 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp257 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp257 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 2 * (int)blockDim.y < iter_height) { float _tmp258 = 0.F; { _tmp258 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp258 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp258 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp258 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp258 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp258 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp258 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp258 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp258 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp258 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp258 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp258 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp258 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp258 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp258 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp258 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp258 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp258 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 3 * (int)blockDim.y < iter_height) { float _tmp259 = 0.F; { _tmp259 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp259 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp259 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp259 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp259 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp259 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp259 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp259 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp259 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp259 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp259 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp259 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp259 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp259 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp259 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp259 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp259 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp259 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 4 * (int)blockDim.y < iter_height) { float _tmp260 = 0.F; { _tmp260 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp260 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp260 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp260 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp260 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp260 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp260 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp260 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp260 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp260 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp260 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp260 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp260 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp260 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp260 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp260 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp260 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp260 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 5 * (int)blockDim.y < iter_height) { float _tmp261 = 0.F; { _tmp261 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp261 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp261 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp261 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp261 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp261 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp261 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp261 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp261 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp261 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp261 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp261 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp261 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp261 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp261 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp261 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp261 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp261 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 6 * (int)blockDim.y < iter_height) { float _tmp262 = 0.F; { _tmp262 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp262 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp262 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp262 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp262 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp262 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp262 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp262 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp262 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp262 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp262 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp262 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp262 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp262 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp262 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp262 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp262 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp262 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 7 * (int)blockDim.y < iter_height) { float _tmp263 = 0.F; { _tmp263 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp263 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp263 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp263 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp263 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp263 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp263 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp263 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp263 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp263 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp263 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp263 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp263 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp263 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp263 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp263 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp263 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp263 + 0.5F); } } } goto BH_EXIT; BH_B: { int _gid_x264 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y264 = gid_y + (-2); if (_gid_y264 >= input_height) _gid_y264 = input_height - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y264) * input_stride + _gid_x264); int _gid_x265 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y265 = gid_y + (-2); if (_gid_y265 >= input_height) _gid_y265 = input_height - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y265) * input_stride + _gid_x265); int _gid_x266 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y266 = gid_y + (-2); if (_gid_y266 >= input_height) _gid_y266 = input_height - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y266) * input_stride + _gid_x266); int _gid_x267 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y267 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y267 >= input_height) _gid_y267 = input_height - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y267) * input_stride + _gid_x267); int _gid_x268 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y268 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y268 >= input_height) _gid_y268 = input_height - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y268) * input_stride + _gid_x268); int _gid_x269 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y269 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y269 >= input_height) _gid_y269 = input_height - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y269) * input_stride + _gid_x269); int _gid_x270 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y270 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y270 >= input_height) _gid_y270 = input_height - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y270) * input_stride + _gid_x270); int _gid_x271 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y271 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y271 >= input_height) _gid_y271 = input_height - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y271) * input_stride + _gid_x271); int _gid_x272 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y272 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y272 >= input_height) _gid_y272 = input_height - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y272) * input_stride + _gid_x272); int _gid_x273 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y273 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y273 >= input_height) _gid_y273 = input_height - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y273) * input_stride + _gid_x273); int _gid_x274 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y274 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y274 >= input_height) _gid_y274 = input_height - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y274) * input_stride + _gid_x274); int _gid_x275 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y275 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y275 >= input_height) _gid_y275 = input_height - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y275) * input_stride + _gid_x275); int _gid_x276 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y276 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y276 >= input_height) _gid_y276 = input_height - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y276) * input_stride + _gid_x276); int _gid_x277 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y277 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y277 >= input_height) _gid_y277 = input_height - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y277) * input_stride + _gid_x277); int _gid_x278 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y278 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y278 >= input_height) _gid_y278 = input_height - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y278) * input_stride + _gid_x278); int _gid_x279 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y279 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y279 >= input_height) _gid_y279 = input_height - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y279) * input_stride + _gid_x279); int _gid_x280 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y280 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y280 >= input_height) _gid_y280 = input_height - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y280) * input_stride + _gid_x280); int _gid_x281 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y281 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y281 >= input_height) _gid_y281 = input_height - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y281) * input_stride + _gid_x281); int _gid_x282 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y282 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y282 >= input_height) _gid_y282 = input_height - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y282) * input_stride + _gid_x282); int _gid_x283 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y283 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y283 >= input_height) _gid_y283 = input_height - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y283) * input_stride + _gid_x283); int _gid_x284 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y284 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y284 >= input_height) _gid_y284 = input_height - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y284) * input_stride + _gid_x284); int _gid_x285 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y285 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y285 >= input_height) _gid_y285 = input_height - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y285) * input_stride + _gid_x285); int _gid_x286 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y286 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y286 >= input_height) _gid_y286 = input_height - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y286) * input_stride + _gid_x286); int _gid_x287 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y287 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y287 >= input_height) _gid_y287 = input_height - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y287) * input_stride + _gid_x287); int _gid_x288 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y288 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y288 >= input_height) _gid_y288 = input_height - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y288) * input_stride + _gid_x288); int _gid_x289 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y289 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y289 >= input_height) _gid_y289 = input_height - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y289) * input_stride + _gid_x289); int _gid_x290 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y290 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y290 >= input_height) _gid_y290 = input_height - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y290) * input_stride + _gid_x290); int _gid_x291 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y291 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y291 >= input_height) _gid_y291 = input_height - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y291) * input_stride + _gid_x291); int _gid_x292 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y292 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y292 >= input_height) _gid_y292 = input_height - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y292) * input_stride + _gid_x292); int _gid_x293 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y293 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y293 >= input_height) _gid_y293 = input_height - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y293) * input_stride + _gid_x293); int _gid_x294 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y294 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y294 >= input_height) _gid_y294 = input_height - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y294) * input_stride + _gid_x294); int _gid_x295 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y295 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y295 >= input_height) _gid_y295 = input_height - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y295) * input_stride + _gid_x295); int _gid_x296 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y296 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y296 >= input_height) _gid_y296 = input_height - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y296) * input_stride + _gid_x296); int _gid_x297 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y297 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y297 >= input_height) _gid_y297 = input_height - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y297) * input_stride + _gid_x297); int _gid_x298 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y298 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y298 >= input_height) _gid_y298 = input_height - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y298) * input_stride + _gid_x298); int _gid_x299 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y299 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y299 >= input_height) _gid_y299 = input_height - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y299) * input_stride + _gid_x299); __syncthreads(); if (gid_y < iter_height) { float _tmp300 = 0.F; { _tmp300 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp300 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp300 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp300 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp300 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp300 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp300 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp300 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp300 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp300 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp300 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp300 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp300 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp300 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp300 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp300 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp300 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp300 + 0.5F); } if (gid_y + 1 * (int)blockDim.y < iter_height) { float _tmp301 = 0.F; { _tmp301 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp301 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp301 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp301 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp301 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp301 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp301 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp301 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp301 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp301 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp301 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp301 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp301 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp301 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp301 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp301 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp301 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp301 + 0.5F); } if (gid_y + 2 * (int)blockDim.y < iter_height) { float _tmp302 = 0.F; { _tmp302 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp302 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp302 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp302 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp302 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp302 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp302 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp302 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp302 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp302 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp302 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp302 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp302 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp302 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp302 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp302 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp302 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp302 + 0.5F); } if (gid_y + 3 * (int)blockDim.y < iter_height) { float _tmp303 = 0.F; { _tmp303 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp303 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp303 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp303 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp303 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp303 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp303 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp303 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp303 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp303 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp303 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp303 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp303 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp303 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp303 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp303 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp303 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp303 + 0.5F); } if (gid_y + 4 * (int)blockDim.y < iter_height) { float _tmp304 = 0.F; { _tmp304 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp304 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp304 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp304 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp304 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp304 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp304 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp304 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp304 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp304 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp304 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp304 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp304 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp304 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp304 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp304 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp304 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp304 + 0.5F); } if (gid_y + 5 * (int)blockDim.y < iter_height) { float _tmp305 = 0.F; { _tmp305 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp305 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp305 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp305 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp305 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp305 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp305 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp305 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp305 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp305 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp305 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp305 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp305 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp305 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp305 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp305 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp305 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp305 + 0.5F); } if (gid_y + 6 * (int)blockDim.y < iter_height) { float _tmp306 = 0.F; { _tmp306 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp306 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp306 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp306 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp306 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp306 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp306 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp306 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp306 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp306 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp306 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp306 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp306 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp306 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp306 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp306 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp306 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp306 + 0.5F); } if (gid_y + 7 * (int)blockDim.y < iter_height) { float _tmp307 = 0.F; { _tmp307 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp307 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp307 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp307 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp307 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp307 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp307 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp307 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp307 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp307 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp307 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp307 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp307 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp307 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp307 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp307 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp307 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp307 + 0.5F); } } goto BH_EXIT; BH_R: { int _gid_x308 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y308 = gid_y + (-2); if (_gid_x308 >= input_width) _gid_x308 = input_width - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y308) * input_stride + _gid_x308); int _gid_x309 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y309 = gid_y + (-2); if (_gid_x309 >= input_width) _gid_x309 = input_width - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y309) * input_stride + _gid_x309); int _gid_x310 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y310 = gid_y + (-2); if (_gid_x310 >= input_width) _gid_x310 = input_width - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y310) * input_stride + _gid_x310); int _gid_x311 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y311 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x311 >= input_width) _gid_x311 = input_width - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y311) * input_stride + _gid_x311); int _gid_x312 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y312 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x312 >= input_width) _gid_x312 = input_width - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y312) * input_stride + _gid_x312); int _gid_x313 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y313 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x313 >= input_width) _gid_x313 = input_width - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y313) * input_stride + _gid_x313); int _gid_x314 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y314 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x314 >= input_width) _gid_x314 = input_width - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y314) * input_stride + _gid_x314); int _gid_x315 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y315 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x315 >= input_width) _gid_x315 = input_width - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y315) * input_stride + _gid_x315); int _gid_x316 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y316 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x316 >= input_width) _gid_x316 = input_width - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y316) * input_stride + _gid_x316); int _gid_x317 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y317 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x317 >= input_width) _gid_x317 = input_width - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y317) * input_stride + _gid_x317); int _gid_x318 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y318 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x318 >= input_width) _gid_x318 = input_width - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y318) * input_stride + _gid_x318); int _gid_x319 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y319 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x319 >= input_width) _gid_x319 = input_width - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y319) * input_stride + _gid_x319); int _gid_x320 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y320 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x320 >= input_width) _gid_x320 = input_width - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y320) * input_stride + _gid_x320); int _gid_x321 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y321 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x321 >= input_width) _gid_x321 = input_width - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y321) * input_stride + _gid_x321); int _gid_x322 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y322 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x322 >= input_width) _gid_x322 = input_width - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y322) * input_stride + _gid_x322); int _gid_x323 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y323 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x323 >= input_width) _gid_x323 = input_width - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y323) * input_stride + _gid_x323); int _gid_x324 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y324 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x324 >= input_width) _gid_x324 = input_width - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y324) * input_stride + _gid_x324); int _gid_x325 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y325 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x325 >= input_width) _gid_x325 = input_width - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y325) * input_stride + _gid_x325); int _gid_x326 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y326 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x326 >= input_width) _gid_x326 = input_width - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y326) * input_stride + _gid_x326); int _gid_x327 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y327 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x327 >= input_width) _gid_x327 = input_width - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y327) * input_stride + _gid_x327); int _gid_x328 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y328 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x328 >= input_width) _gid_x328 = input_width - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y328) * input_stride + _gid_x328); int _gid_x329 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y329 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x329 >= input_width) _gid_x329 = input_width - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y329) * input_stride + _gid_x329); int _gid_x330 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y330 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x330 >= input_width) _gid_x330 = input_width - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y330) * input_stride + _gid_x330); int _gid_x331 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y331 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x331 >= input_width) _gid_x331 = input_width - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y331) * input_stride + _gid_x331); int _gid_x332 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y332 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x332 >= input_width) _gid_x332 = input_width - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y332) * input_stride + _gid_x332); int _gid_x333 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y333 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x333 >= input_width) _gid_x333 = input_width - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y333) * input_stride + _gid_x333); int _gid_x334 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y334 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x334 >= input_width) _gid_x334 = input_width - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y334) * input_stride + _gid_x334); int _gid_x335 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y335 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x335 >= input_width) _gid_x335 = input_width - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y335) * input_stride + _gid_x335); int _gid_x336 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y336 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x336 >= input_width) _gid_x336 = input_width - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y336) * input_stride + _gid_x336); int _gid_x337 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y337 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x337 >= input_width) _gid_x337 = input_width - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y337) * input_stride + _gid_x337); int _gid_x338 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y338 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x338 >= input_width) _gid_x338 = input_width - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y338) * input_stride + _gid_x338); int _gid_x339 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y339 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x339 >= input_width) _gid_x339 = input_width - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y339) * input_stride + _gid_x339); int _gid_x340 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y340 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x340 >= input_width) _gid_x340 = input_width - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y340) * input_stride + _gid_x340); int _gid_x341 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y341 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x341 >= input_width) _gid_x341 = input_width - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y341) * input_stride + _gid_x341); int _gid_x342 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y342 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x342 >= input_width) _gid_x342 = input_width - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y342) * input_stride + _gid_x342); int _gid_x343 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y343 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x343 >= input_width) _gid_x343 = input_width - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y343) * input_stride + _gid_x343); __syncthreads(); if (gid_x < iter_width) { { float _tmp344 = 0.F; { _tmp344 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp344 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp344 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp344 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp344 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp344 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp344 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp344 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp344 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp344 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp344 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp344 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp344 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp344 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp344 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp344 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp344 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp344 + 0.5F); } } if (gid_x < iter_width) { { float _tmp345 = 0.F; { _tmp345 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp345 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp345 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp345 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp345 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp345 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp345 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp345 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp345 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp345 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp345 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp345 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp345 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp345 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp345 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp345 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp345 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp345 + 0.5F); } } if (gid_x < iter_width) { { float _tmp346 = 0.F; { _tmp346 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp346 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp346 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp346 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp346 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp346 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp346 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp346 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp346 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp346 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp346 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp346 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp346 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp346 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp346 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp346 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp346 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp346 + 0.5F); } } if (gid_x < iter_width) { { float _tmp347 = 0.F; { _tmp347 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp347 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp347 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp347 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp347 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp347 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp347 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp347 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp347 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp347 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp347 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp347 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp347 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp347 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp347 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp347 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp347 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp347 + 0.5F); } } if (gid_x < iter_width) { { float _tmp348 = 0.F; { _tmp348 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp348 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp348 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp348 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp348 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp348 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp348 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp348 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp348 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp348 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp348 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp348 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp348 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp348 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp348 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp348 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp348 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp348 + 0.5F); } } if (gid_x < iter_width) { { float _tmp349 = 0.F; { _tmp349 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp349 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp349 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp349 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp349 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp349 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp349 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp349 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp349 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp349 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp349 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp349 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp349 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp349 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp349 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp349 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp349 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp349 + 0.5F); } } if (gid_x < iter_width) { { float _tmp350 = 0.F; { _tmp350 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp350 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp350 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp350 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp350 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp350 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp350 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp350 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp350 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp350 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp350 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp350 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp350 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp350 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp350 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp350 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp350 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp350 + 0.5F); } } if (gid_x < iter_width) { { float _tmp351 = 0.F; { _tmp351 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp351 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp351 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp351 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp351 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp351 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp351 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp351 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp351 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp351 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp351 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp351 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp351 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp351 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp351 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp351 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp351 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp351 + 0.5F); } } } goto BH_EXIT; BH_L: { int _gid_x352 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y352 = gid_y + (-2); if (_gid_x352 < 0) _gid_x352 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y352) * input_stride + _gid_x352); int _gid_x353 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y353 = gid_y + (-2); if (_gid_x353 < 0) _gid_x353 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y353) * input_stride + _gid_x353); int _gid_x354 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y354 = gid_y + (-2); if (_gid_x354 < 0) _gid_x354 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y354) * input_stride + _gid_x354); int _gid_x355 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y355 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x355 < 0) _gid_x355 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y355) * input_stride + _gid_x355); int _gid_x356 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y356 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x356 < 0) _gid_x356 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y356) * input_stride + _gid_x356); int _gid_x357 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y357 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x357 < 0) _gid_x357 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y357) * input_stride + _gid_x357); int _gid_x358 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y358 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x358 < 0) _gid_x358 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y358) * input_stride + _gid_x358); int _gid_x359 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y359 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x359 < 0) _gid_x359 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y359) * input_stride + _gid_x359); int _gid_x360 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y360 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x360 < 0) _gid_x360 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y360) * input_stride + _gid_x360); int _gid_x361 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y361 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x361 < 0) _gid_x361 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y361) * input_stride + _gid_x361); int _gid_x362 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y362 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x362 < 0) _gid_x362 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y362) * input_stride + _gid_x362); int _gid_x363 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y363 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x363 < 0) _gid_x363 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y363) * input_stride + _gid_x363); int _gid_x364 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y364 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x364 < 0) _gid_x364 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y364) * input_stride + _gid_x364); int _gid_x365 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y365 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x365 < 0) _gid_x365 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y365) * input_stride + _gid_x365); int _gid_x366 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y366 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x366 < 0) _gid_x366 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y366) * input_stride + _gid_x366); int _gid_x367 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y367 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x367 < 0) _gid_x367 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y367) * input_stride + _gid_x367); int _gid_x368 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y368 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x368 < 0) _gid_x368 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y368) * input_stride + _gid_x368); int _gid_x369 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y369 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x369 < 0) _gid_x369 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y369) * input_stride + _gid_x369); int _gid_x370 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y370 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x370 < 0) _gid_x370 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y370) * input_stride + _gid_x370); int _gid_x371 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y371 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x371 < 0) _gid_x371 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y371) * input_stride + _gid_x371); int _gid_x372 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y372 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x372 < 0) _gid_x372 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y372) * input_stride + _gid_x372); int _gid_x373 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y373 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x373 < 0) _gid_x373 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y373) * input_stride + _gid_x373); int _gid_x374 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y374 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x374 < 0) _gid_x374 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y374) * input_stride + _gid_x374); int _gid_x375 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y375 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x375 < 0) _gid_x375 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y375) * input_stride + _gid_x375); int _gid_x376 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y376 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x376 < 0) _gid_x376 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y376) * input_stride + _gid_x376); int _gid_x377 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y377 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x377 < 0) _gid_x377 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y377) * input_stride + _gid_x377); int _gid_x378 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y378 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x378 < 0) _gid_x378 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y378) * input_stride + _gid_x378); int _gid_x379 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y379 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x379 < 0) _gid_x379 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y379) * input_stride + _gid_x379); int _gid_x380 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y380 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x380 < 0) _gid_x380 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y380) * input_stride + _gid_x380); int _gid_x381 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y381 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x381 < 0) _gid_x381 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y381) * input_stride + _gid_x381); int _gid_x382 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y382 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x382 < 0) _gid_x382 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y382) * input_stride + _gid_x382); int _gid_x383 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y383 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x383 < 0) _gid_x383 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y383) * input_stride + _gid_x383); int _gid_x384 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y384 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x384 < 0) _gid_x384 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y384) * input_stride + _gid_x384); int _gid_x385 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y385 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x385 < 0) _gid_x385 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y385) * input_stride + _gid_x385); int _gid_x386 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y386 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x386 < 0) _gid_x386 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y386) * input_stride + _gid_x386); int _gid_x387 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y387 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x387 < 0) _gid_x387 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y387) * input_stride + _gid_x387); __syncthreads(); { float _tmp388 = 0.F; { _tmp388 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp388 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp388 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp388 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp388 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp388 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp388 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp388 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp388 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp388 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp388 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp388 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp388 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp388 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp388 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp388 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp388 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp388 + 0.5F); } { float _tmp389 = 0.F; { _tmp389 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp389 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp389 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp389 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp389 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp389 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp389 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp389 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp389 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp389 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp389 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp389 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp389 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp389 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp389 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp389 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp389 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp389 + 0.5F); } { float _tmp390 = 0.F; { _tmp390 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp390 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp390 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp390 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp390 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp390 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp390 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp390 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp390 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp390 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp390 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp390 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp390 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp390 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp390 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp390 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp390 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp390 + 0.5F); } { float _tmp391 = 0.F; { _tmp391 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp391 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp391 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp391 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp391 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp391 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp391 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp391 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp391 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp391 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp391 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp391 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp391 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp391 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp391 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp391 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp391 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp391 + 0.5F); } { float _tmp392 = 0.F; { _tmp392 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp392 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp392 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp392 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp392 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp392 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp392 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp392 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp392 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp392 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp392 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp392 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp392 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp392 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp392 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp392 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp392 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp392 + 0.5F); } { float _tmp393 = 0.F; { _tmp393 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp393 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp393 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp393 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp393 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp393 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp393 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp393 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp393 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp393 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp393 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp393 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp393 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp393 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp393 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp393 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp393 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp393 + 0.5F); } { float _tmp394 = 0.F; { _tmp394 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp394 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp394 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp394 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp394 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp394 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp394 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp394 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp394 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp394 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp394 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp394 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp394 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp394 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp394 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp394 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp394 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp394 + 0.5F); } { float _tmp395 = 0.F; { _tmp395 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp395 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp395 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp395 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp395 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp395 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp395 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp395 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp395 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp395 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp395 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp395 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp395 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp395 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp395 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp395 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp395 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp395 + 0.5F); } } goto BH_EXIT; BH_NO: { _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 1 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 1 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 1 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 2 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 2 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 2 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 3 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 3 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 3 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 4 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 4 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 4 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 5 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 5 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 5 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 6 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 6 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 6 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 7 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 7 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 7 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 8 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 8 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 8 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 9 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 9 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 9 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 10 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 10 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 10 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 11 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 11 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 11 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); __syncthreads(); { float _tmp396 = 0.F; { _tmp396 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp396 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp396 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp396 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp396 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp396 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp396 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp396 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp396 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp396 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp396 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp396 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp396 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp396 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp396 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp396 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp396 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp396 + 0.5F); } { float _tmp397 = 0.F; { _tmp397 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp397 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp397 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp397 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp397 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp397 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp397 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp397 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp397 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp397 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp397 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp397 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp397 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp397 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp397 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp397 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp397 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp397 + 0.5F); } { float _tmp398 = 0.F; { _tmp398 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp398 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp398 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp398 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp398 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp398 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp398 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp398 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp398 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp398 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp398 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp398 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp398 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp398 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp398 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp398 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp398 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp398 + 0.5F); } { float _tmp399 = 0.F; { _tmp399 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp399 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp399 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp399 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp399 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp399 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp399 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp399 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp399 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp399 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp399 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp399 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp399 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp399 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp399 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp399 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp399 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp399 + 0.5F); } { float _tmp400 = 0.F; { _tmp400 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp400 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp400 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp400 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp400 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp400 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp400 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp400 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp400 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp400 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp400 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp400 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp400 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp400 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp400 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp400 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp400 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp400 + 0.5F); } { float _tmp401 = 0.F; { _tmp401 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp401 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp401 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp401 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp401 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp401 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp401 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp401 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp401 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp401 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp401 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp401 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp401 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp401 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp401 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp401 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp401 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp401 + 0.5F); } { float _tmp402 = 0.F; { _tmp402 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp402 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp402 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp402 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp402 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp402 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp402 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp402 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp402 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp402 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp402 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp402 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp402 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp402 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp402 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp402 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp402 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp402 + 0.5F); } { float _tmp403 = 0.F; { _tmp403 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp403 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp403 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp403 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp403 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp403 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp403 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp403 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp403 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp403 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp403 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp403 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp403 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp403 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp403 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp403 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp403 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp403 + 0.5F); } } goto BH_EXIT; BH_EXIT: ; } } #endif //_CUGAUSSIANFILTERXY_CU_
ccb935903ece3f4df7a2265a42824a12af2461a0.cu
#ifndef _CUGAUSSIANFILTERXY_CU_ #define _CUGAUSSIANFILTERXY_CU_ #include "hipacc_types.hpp" #include "hipacc_math_functions.hpp" texture<uchar, cudaTextureType1D, cudaReadModeElementType> _texinputXY; const textureReference *_texinputXYRef; extern "C" { __global__ __launch_bounds__ (32*1) void cuGaussianFilterXYKernel(uchar * __restrict__ iter, int iter_width, int iter_height, int iter_stride, int input_width, int input_height, int input_stride, int bh_start_left, int bh_start_right, int bh_start_top, int bh_start_bottom, int bh_fall_back) { const int gid_x = blockDim.x * blockIdx.x + threadIdx.x; const int gid_y = blockDim.y * blockIdx.y * 8 + threadIdx.y; uchar _smeminput[12][97] __attribute__((shared)); if (bh_fall_back) goto BH_FB; if (blockIdx.x < bh_start_left && blockIdx.y < bh_start_top) goto BH_TL; if (blockIdx.x >= bh_start_right && blockIdx.y < bh_start_top) goto BH_TR; if (blockIdx.y < bh_start_top) goto BH_T; if (blockIdx.y >= bh_start_bottom && blockIdx.x < bh_start_left) goto BH_BL; if (blockIdx.y >= bh_start_bottom && blockIdx.x >= bh_start_right) goto BH_BR; if (blockIdx.y >= bh_start_bottom) goto BH_B; if (blockIdx.x >= bh_start_right) goto BH_R; if (blockIdx.x < bh_start_left) goto BH_L; goto BH_NO; BH_FB: { int _gid_x0 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y0 = gid_y + (-2); if (_gid_x0 >= input_width) _gid_x0 = input_width - 1; if (_gid_y0 >= input_height) _gid_y0 = input_height - 1; if (_gid_x0 < 0) _gid_x0 = 0; if (_gid_y0 < 0) _gid_y0 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y0) * input_stride + _gid_x0); int _gid_x1 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y1 = gid_y + (-2); if (_gid_x1 >= input_width) _gid_x1 = input_width - 1; if (_gid_y1 >= input_height) _gid_y1 = input_height - 1; if (_gid_x1 < 0) _gid_x1 = 0; if (_gid_y1 < 0) _gid_y1 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y1) * input_stride + _gid_x1); int _gid_x2 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y2 = gid_y + (-2); if (_gid_x2 >= input_width) _gid_x2 = input_width - 1; if (_gid_y2 >= input_height) _gid_y2 = input_height - 1; if (_gid_x2 < 0) _gid_x2 = 0; if (_gid_y2 < 0) _gid_y2 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y2) * input_stride + _gid_x2); int _gid_x3 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y3 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x3 >= input_width) _gid_x3 = input_width - 1; if (_gid_y3 >= input_height) _gid_y3 = input_height - 1; if (_gid_x3 < 0) _gid_x3 = 0; if (_gid_y3 < 0) _gid_y3 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y3) * input_stride + _gid_x3); int _gid_x4 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y4 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x4 >= input_width) _gid_x4 = input_width - 1; if (_gid_y4 >= input_height) _gid_y4 = input_height - 1; if (_gid_x4 < 0) _gid_x4 = 0; if (_gid_y4 < 0) _gid_y4 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y4) * input_stride + _gid_x4); int _gid_x5 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y5 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x5 >= input_width) _gid_x5 = input_width - 1; if (_gid_y5 >= input_height) _gid_y5 = input_height - 1; if (_gid_x5 < 0) _gid_x5 = 0; if (_gid_y5 < 0) _gid_y5 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y5) * input_stride + _gid_x5); int _gid_x6 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y6 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x6 >= input_width) _gid_x6 = input_width - 1; if (_gid_y6 >= input_height) _gid_y6 = input_height - 1; if (_gid_x6 < 0) _gid_x6 = 0; if (_gid_y6 < 0) _gid_y6 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y6) * input_stride + _gid_x6); int _gid_x7 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y7 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x7 >= input_width) _gid_x7 = input_width - 1; if (_gid_y7 >= input_height) _gid_y7 = input_height - 1; if (_gid_x7 < 0) _gid_x7 = 0; if (_gid_y7 < 0) _gid_y7 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y7) * input_stride + _gid_x7); int _gid_x8 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y8 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x8 >= input_width) _gid_x8 = input_width - 1; if (_gid_y8 >= input_height) _gid_y8 = input_height - 1; if (_gid_x8 < 0) _gid_x8 = 0; if (_gid_y8 < 0) _gid_y8 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y8) * input_stride + _gid_x8); int _gid_x9 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y9 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x9 >= input_width) _gid_x9 = input_width - 1; if (_gid_y9 >= input_height) _gid_y9 = input_height - 1; if (_gid_x9 < 0) _gid_x9 = 0; if (_gid_y9 < 0) _gid_y9 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y9) * input_stride + _gid_x9); int _gid_x10 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y10 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x10 >= input_width) _gid_x10 = input_width - 1; if (_gid_y10 >= input_height) _gid_y10 = input_height - 1; if (_gid_x10 < 0) _gid_x10 = 0; if (_gid_y10 < 0) _gid_y10 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y10) * input_stride + _gid_x10); int _gid_x11 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y11 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x11 >= input_width) _gid_x11 = input_width - 1; if (_gid_y11 >= input_height) _gid_y11 = input_height - 1; if (_gid_x11 < 0) _gid_x11 = 0; if (_gid_y11 < 0) _gid_y11 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y11) * input_stride + _gid_x11); int _gid_x12 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y12 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x12 >= input_width) _gid_x12 = input_width - 1; if (_gid_y12 >= input_height) _gid_y12 = input_height - 1; if (_gid_x12 < 0) _gid_x12 = 0; if (_gid_y12 < 0) _gid_y12 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y12) * input_stride + _gid_x12); int _gid_x13 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y13 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x13 >= input_width) _gid_x13 = input_width - 1; if (_gid_y13 >= input_height) _gid_y13 = input_height - 1; if (_gid_x13 < 0) _gid_x13 = 0; if (_gid_y13 < 0) _gid_y13 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y13) * input_stride + _gid_x13); int _gid_x14 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y14 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x14 >= input_width) _gid_x14 = input_width - 1; if (_gid_y14 >= input_height) _gid_y14 = input_height - 1; if (_gid_x14 < 0) _gid_x14 = 0; if (_gid_y14 < 0) _gid_y14 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y14) * input_stride + _gid_x14); int _gid_x15 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y15 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x15 >= input_width) _gid_x15 = input_width - 1; if (_gid_y15 >= input_height) _gid_y15 = input_height - 1; if (_gid_x15 < 0) _gid_x15 = 0; if (_gid_y15 < 0) _gid_y15 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y15) * input_stride + _gid_x15); int _gid_x16 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y16 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x16 >= input_width) _gid_x16 = input_width - 1; if (_gid_y16 >= input_height) _gid_y16 = input_height - 1; if (_gid_x16 < 0) _gid_x16 = 0; if (_gid_y16 < 0) _gid_y16 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y16) * input_stride + _gid_x16); int _gid_x17 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y17 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x17 >= input_width) _gid_x17 = input_width - 1; if (_gid_y17 >= input_height) _gid_y17 = input_height - 1; if (_gid_x17 < 0) _gid_x17 = 0; if (_gid_y17 < 0) _gid_y17 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y17) * input_stride + _gid_x17); int _gid_x18 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y18 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x18 >= input_width) _gid_x18 = input_width - 1; if (_gid_y18 >= input_height) _gid_y18 = input_height - 1; if (_gid_x18 < 0) _gid_x18 = 0; if (_gid_y18 < 0) _gid_y18 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y18) * input_stride + _gid_x18); int _gid_x19 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y19 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x19 >= input_width) _gid_x19 = input_width - 1; if (_gid_y19 >= input_height) _gid_y19 = input_height - 1; if (_gid_x19 < 0) _gid_x19 = 0; if (_gid_y19 < 0) _gid_y19 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y19) * input_stride + _gid_x19); int _gid_x20 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y20 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x20 >= input_width) _gid_x20 = input_width - 1; if (_gid_y20 >= input_height) _gid_y20 = input_height - 1; if (_gid_x20 < 0) _gid_x20 = 0; if (_gid_y20 < 0) _gid_y20 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y20) * input_stride + _gid_x20); int _gid_x21 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y21 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x21 >= input_width) _gid_x21 = input_width - 1; if (_gid_y21 >= input_height) _gid_y21 = input_height - 1; if (_gid_x21 < 0) _gid_x21 = 0; if (_gid_y21 < 0) _gid_y21 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y21) * input_stride + _gid_x21); int _gid_x22 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y22 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x22 >= input_width) _gid_x22 = input_width - 1; if (_gid_y22 >= input_height) _gid_y22 = input_height - 1; if (_gid_x22 < 0) _gid_x22 = 0; if (_gid_y22 < 0) _gid_y22 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y22) * input_stride + _gid_x22); int _gid_x23 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y23 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x23 >= input_width) _gid_x23 = input_width - 1; if (_gid_y23 >= input_height) _gid_y23 = input_height - 1; if (_gid_x23 < 0) _gid_x23 = 0; if (_gid_y23 < 0) _gid_y23 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y23) * input_stride + _gid_x23); int _gid_x24 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y24 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x24 >= input_width) _gid_x24 = input_width - 1; if (_gid_y24 >= input_height) _gid_y24 = input_height - 1; if (_gid_x24 < 0) _gid_x24 = 0; if (_gid_y24 < 0) _gid_y24 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y24) * input_stride + _gid_x24); int _gid_x25 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y25 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x25 >= input_width) _gid_x25 = input_width - 1; if (_gid_y25 >= input_height) _gid_y25 = input_height - 1; if (_gid_x25 < 0) _gid_x25 = 0; if (_gid_y25 < 0) _gid_y25 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y25) * input_stride + _gid_x25); int _gid_x26 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y26 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x26 >= input_width) _gid_x26 = input_width - 1; if (_gid_y26 >= input_height) _gid_y26 = input_height - 1; if (_gid_x26 < 0) _gid_x26 = 0; if (_gid_y26 < 0) _gid_y26 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y26) * input_stride + _gid_x26); int _gid_x27 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y27 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x27 >= input_width) _gid_x27 = input_width - 1; if (_gid_y27 >= input_height) _gid_y27 = input_height - 1; if (_gid_x27 < 0) _gid_x27 = 0; if (_gid_y27 < 0) _gid_y27 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y27) * input_stride + _gid_x27); int _gid_x28 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y28 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x28 >= input_width) _gid_x28 = input_width - 1; if (_gid_y28 >= input_height) _gid_y28 = input_height - 1; if (_gid_x28 < 0) _gid_x28 = 0; if (_gid_y28 < 0) _gid_y28 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y28) * input_stride + _gid_x28); int _gid_x29 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y29 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x29 >= input_width) _gid_x29 = input_width - 1; if (_gid_y29 >= input_height) _gid_y29 = input_height - 1; if (_gid_x29 < 0) _gid_x29 = 0; if (_gid_y29 < 0) _gid_y29 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y29) * input_stride + _gid_x29); int _gid_x30 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y30 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x30 >= input_width) _gid_x30 = input_width - 1; if (_gid_y30 >= input_height) _gid_y30 = input_height - 1; if (_gid_x30 < 0) _gid_x30 = 0; if (_gid_y30 < 0) _gid_y30 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y30) * input_stride + _gid_x30); int _gid_x31 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y31 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x31 >= input_width) _gid_x31 = input_width - 1; if (_gid_y31 >= input_height) _gid_y31 = input_height - 1; if (_gid_x31 < 0) _gid_x31 = 0; if (_gid_y31 < 0) _gid_y31 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y31) * input_stride + _gid_x31); int _gid_x32 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y32 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x32 >= input_width) _gid_x32 = input_width - 1; if (_gid_y32 >= input_height) _gid_y32 = input_height - 1; if (_gid_x32 < 0) _gid_x32 = 0; if (_gid_y32 < 0) _gid_y32 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y32) * input_stride + _gid_x32); int _gid_x33 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y33 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x33 >= input_width) _gid_x33 = input_width - 1; if (_gid_y33 >= input_height) _gid_y33 = input_height - 1; if (_gid_x33 < 0) _gid_x33 = 0; if (_gid_y33 < 0) _gid_y33 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y33) * input_stride + _gid_x33); int _gid_x34 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y34 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x34 >= input_width) _gid_x34 = input_width - 1; if (_gid_y34 >= input_height) _gid_y34 = input_height - 1; if (_gid_x34 < 0) _gid_x34 = 0; if (_gid_y34 < 0) _gid_y34 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y34) * input_stride + _gid_x34); int _gid_x35 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y35 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x35 >= input_width) _gid_x35 = input_width - 1; if (_gid_y35 >= input_height) _gid_y35 = input_height - 1; if (_gid_x35 < 0) _gid_x35 = 0; if (_gid_y35 < 0) _gid_y35 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y35) * input_stride + _gid_x35); __syncthreads(); if (gid_x < iter_width) { if (gid_y < iter_height) { float _tmp36 = 0.F; { _tmp36 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp36 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp36 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp36 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp36 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp36 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp36 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp36 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp36 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp36 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp36 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp36 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp36 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp36 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp36 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp36 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp36 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp36 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp36 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 1 * (int)blockDim.y < iter_height) { float _tmp37 = 0.F; { _tmp37 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp37 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp37 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp37 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp37 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp37 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp37 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp37 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp37 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp37 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp37 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp37 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp37 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp37 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp37 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp37 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp37 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp37 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp37 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 2 * (int)blockDim.y < iter_height) { float _tmp38 = 0.F; { _tmp38 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp38 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp38 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp38 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp38 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp38 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp38 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp38 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp38 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp38 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp38 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp38 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp38 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp38 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp38 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp38 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp38 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp38 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp38 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 3 * (int)blockDim.y < iter_height) { float _tmp39 = 0.F; { _tmp39 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp39 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp39 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp39 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp39 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp39 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp39 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp39 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp39 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp39 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp39 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp39 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp39 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp39 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp39 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp39 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp39 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp39 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp39 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 4 * (int)blockDim.y < iter_height) { float _tmp40 = 0.F; { _tmp40 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp40 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp40 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp40 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp40 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp40 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp40 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp40 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp40 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp40 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp40 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp40 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp40 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp40 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp40 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp40 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp40 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp40 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp40 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 5 * (int)blockDim.y < iter_height) { float _tmp41 = 0.F; { _tmp41 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp41 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp41 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp41 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp41 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp41 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp41 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp41 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp41 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp41 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp41 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp41 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp41 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp41 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp41 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp41 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp41 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp41 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp41 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 6 * (int)blockDim.y < iter_height) { float _tmp42 = 0.F; { _tmp42 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp42 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp42 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp42 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp42 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp42 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp42 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp42 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp42 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp42 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp42 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp42 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp42 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp42 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp42 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp42 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp42 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp42 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp42 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 7 * (int)blockDim.y < iter_height) { float _tmp43 = 0.F; { _tmp43 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp43 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp43 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp43 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp43 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp43 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp43 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp43 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp43 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp43 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp43 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp43 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp43 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp43 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp43 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp43 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp43 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp43 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp43 + 0.5F); } } } goto BH_EXIT; BH_TL: { int _gid_x44 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y44 = gid_y + (-2); if (_gid_x44 < 0) _gid_x44 = 0; if (_gid_y44 < 0) _gid_y44 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y44) * input_stride + _gid_x44); int _gid_x45 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y45 = gid_y + (-2); if (_gid_x45 < 0) _gid_x45 = 0; if (_gid_y45 < 0) _gid_y45 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y45) * input_stride + _gid_x45); int _gid_x46 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y46 = gid_y + (-2); if (_gid_x46 < 0) _gid_x46 = 0; if (_gid_y46 < 0) _gid_y46 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y46) * input_stride + _gid_x46); int _gid_x47 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y47 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x47 < 0) _gid_x47 = 0; if (_gid_y47 < 0) _gid_y47 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y47) * input_stride + _gid_x47); int _gid_x48 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y48 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x48 < 0) _gid_x48 = 0; if (_gid_y48 < 0) _gid_y48 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y48) * input_stride + _gid_x48); int _gid_x49 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y49 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x49 < 0) _gid_x49 = 0; if (_gid_y49 < 0) _gid_y49 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y49) * input_stride + _gid_x49); int _gid_x50 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y50 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x50 < 0) _gid_x50 = 0; if (_gid_y50 < 0) _gid_y50 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y50) * input_stride + _gid_x50); int _gid_x51 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y51 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x51 < 0) _gid_x51 = 0; if (_gid_y51 < 0) _gid_y51 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y51) * input_stride + _gid_x51); int _gid_x52 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y52 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x52 < 0) _gid_x52 = 0; if (_gid_y52 < 0) _gid_y52 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y52) * input_stride + _gid_x52); int _gid_x53 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y53 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x53 < 0) _gid_x53 = 0; if (_gid_y53 < 0) _gid_y53 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y53) * input_stride + _gid_x53); int _gid_x54 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y54 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x54 < 0) _gid_x54 = 0; if (_gid_y54 < 0) _gid_y54 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y54) * input_stride + _gid_x54); int _gid_x55 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y55 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x55 < 0) _gid_x55 = 0; if (_gid_y55 < 0) _gid_y55 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y55) * input_stride + _gid_x55); int _gid_x56 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y56 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x56 < 0) _gid_x56 = 0; if (_gid_y56 < 0) _gid_y56 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y56) * input_stride + _gid_x56); int _gid_x57 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y57 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x57 < 0) _gid_x57 = 0; if (_gid_y57 < 0) _gid_y57 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y57) * input_stride + _gid_x57); int _gid_x58 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y58 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x58 < 0) _gid_x58 = 0; if (_gid_y58 < 0) _gid_y58 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y58) * input_stride + _gid_x58); int _gid_x59 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y59 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x59 < 0) _gid_x59 = 0; if (_gid_y59 < 0) _gid_y59 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y59) * input_stride + _gid_x59); int _gid_x60 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y60 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x60 < 0) _gid_x60 = 0; if (_gid_y60 < 0) _gid_y60 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y60) * input_stride + _gid_x60); int _gid_x61 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y61 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x61 < 0) _gid_x61 = 0; if (_gid_y61 < 0) _gid_y61 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y61) * input_stride + _gid_x61); int _gid_x62 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y62 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x62 < 0) _gid_x62 = 0; if (_gid_y62 < 0) _gid_y62 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y62) * input_stride + _gid_x62); int _gid_x63 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y63 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x63 < 0) _gid_x63 = 0; if (_gid_y63 < 0) _gid_y63 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y63) * input_stride + _gid_x63); int _gid_x64 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y64 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x64 < 0) _gid_x64 = 0; if (_gid_y64 < 0) _gid_y64 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y64) * input_stride + _gid_x64); int _gid_x65 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y65 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x65 < 0) _gid_x65 = 0; if (_gid_y65 < 0) _gid_y65 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y65) * input_stride + _gid_x65); int _gid_x66 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y66 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x66 < 0) _gid_x66 = 0; if (_gid_y66 < 0) _gid_y66 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y66) * input_stride + _gid_x66); int _gid_x67 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y67 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x67 < 0) _gid_x67 = 0; if (_gid_y67 < 0) _gid_y67 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y67) * input_stride + _gid_x67); int _gid_x68 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y68 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x68 < 0) _gid_x68 = 0; if (_gid_y68 < 0) _gid_y68 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y68) * input_stride + _gid_x68); int _gid_x69 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y69 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x69 < 0) _gid_x69 = 0; if (_gid_y69 < 0) _gid_y69 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y69) * input_stride + _gid_x69); int _gid_x70 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y70 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x70 < 0) _gid_x70 = 0; if (_gid_y70 < 0) _gid_y70 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y70) * input_stride + _gid_x70); int _gid_x71 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y71 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x71 < 0) _gid_x71 = 0; if (_gid_y71 < 0) _gid_y71 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y71) * input_stride + _gid_x71); int _gid_x72 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y72 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x72 < 0) _gid_x72 = 0; if (_gid_y72 < 0) _gid_y72 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y72) * input_stride + _gid_x72); int _gid_x73 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y73 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x73 < 0) _gid_x73 = 0; if (_gid_y73 < 0) _gid_y73 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y73) * input_stride + _gid_x73); int _gid_x74 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y74 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x74 < 0) _gid_x74 = 0; if (_gid_y74 < 0) _gid_y74 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y74) * input_stride + _gid_x74); int _gid_x75 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y75 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x75 < 0) _gid_x75 = 0; if (_gid_y75 < 0) _gid_y75 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y75) * input_stride + _gid_x75); int _gid_x76 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y76 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x76 < 0) _gid_x76 = 0; if (_gid_y76 < 0) _gid_y76 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y76) * input_stride + _gid_x76); int _gid_x77 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y77 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x77 < 0) _gid_x77 = 0; if (_gid_y77 < 0) _gid_y77 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y77) * input_stride + _gid_x77); int _gid_x78 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y78 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x78 < 0) _gid_x78 = 0; if (_gid_y78 < 0) _gid_y78 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y78) * input_stride + _gid_x78); int _gid_x79 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y79 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x79 < 0) _gid_x79 = 0; if (_gid_y79 < 0) _gid_y79 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y79) * input_stride + _gid_x79); __syncthreads(); { float _tmp80 = 0.F; { _tmp80 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp80 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp80 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp80 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp80 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp80 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp80 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp80 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp80 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp80 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp80 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp80 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp80 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp80 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp80 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp80 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp80 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp80 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp80 + 0.5F); } { float _tmp81 = 0.F; { _tmp81 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp81 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp81 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp81 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp81 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp81 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp81 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp81 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp81 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp81 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp81 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp81 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp81 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp81 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp81 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp81 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp81 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp81 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp81 + 0.5F); } { float _tmp82 = 0.F; { _tmp82 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp82 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp82 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp82 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp82 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp82 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp82 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp82 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp82 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp82 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp82 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp82 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp82 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp82 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp82 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp82 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp82 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp82 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp82 + 0.5F); } { float _tmp83 = 0.F; { _tmp83 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp83 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp83 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp83 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp83 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp83 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp83 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp83 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp83 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp83 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp83 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp83 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp83 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp83 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp83 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp83 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp83 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp83 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp83 + 0.5F); } { float _tmp84 = 0.F; { _tmp84 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp84 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp84 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp84 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp84 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp84 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp84 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp84 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp84 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp84 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp84 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp84 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp84 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp84 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp84 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp84 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp84 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp84 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp84 + 0.5F); } { float _tmp85 = 0.F; { _tmp85 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp85 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp85 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp85 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp85 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp85 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp85 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp85 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp85 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp85 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp85 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp85 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp85 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp85 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp85 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp85 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp85 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp85 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp85 + 0.5F); } { float _tmp86 = 0.F; { _tmp86 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp86 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp86 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp86 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp86 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp86 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp86 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp86 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp86 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp86 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp86 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp86 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp86 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp86 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp86 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp86 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp86 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp86 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp86 + 0.5F); } { float _tmp87 = 0.F; { _tmp87 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp87 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp87 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp87 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp87 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp87 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp87 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp87 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp87 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp87 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp87 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp87 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp87 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp87 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp87 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp87 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp87 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp87 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp87 + 0.5F); } } goto BH_EXIT; BH_TR: { int _gid_x88 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y88 = gid_y + (-2); if (_gid_x88 >= input_width) _gid_x88 = input_width - 1; if (_gid_y88 < 0) _gid_y88 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y88) * input_stride + _gid_x88); int _gid_x89 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y89 = gid_y + (-2); if (_gid_x89 >= input_width) _gid_x89 = input_width - 1; if (_gid_y89 < 0) _gid_y89 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y89) * input_stride + _gid_x89); int _gid_x90 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y90 = gid_y + (-2); if (_gid_x90 >= input_width) _gid_x90 = input_width - 1; if (_gid_y90 < 0) _gid_y90 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y90) * input_stride + _gid_x90); int _gid_x91 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y91 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x91 >= input_width) _gid_x91 = input_width - 1; if (_gid_y91 < 0) _gid_y91 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y91) * input_stride + _gid_x91); int _gid_x92 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y92 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x92 >= input_width) _gid_x92 = input_width - 1; if (_gid_y92 < 0) _gid_y92 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y92) * input_stride + _gid_x92); int _gid_x93 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y93 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x93 >= input_width) _gid_x93 = input_width - 1; if (_gid_y93 < 0) _gid_y93 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y93) * input_stride + _gid_x93); int _gid_x94 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y94 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x94 >= input_width) _gid_x94 = input_width - 1; if (_gid_y94 < 0) _gid_y94 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y94) * input_stride + _gid_x94); int _gid_x95 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y95 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x95 >= input_width) _gid_x95 = input_width - 1; if (_gid_y95 < 0) _gid_y95 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y95) * input_stride + _gid_x95); int _gid_x96 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y96 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x96 >= input_width) _gid_x96 = input_width - 1; if (_gid_y96 < 0) _gid_y96 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y96) * input_stride + _gid_x96); int _gid_x97 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y97 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x97 >= input_width) _gid_x97 = input_width - 1; if (_gid_y97 < 0) _gid_y97 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y97) * input_stride + _gid_x97); int _gid_x98 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y98 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x98 >= input_width) _gid_x98 = input_width - 1; if (_gid_y98 < 0) _gid_y98 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y98) * input_stride + _gid_x98); int _gid_x99 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y99 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x99 >= input_width) _gid_x99 = input_width - 1; if (_gid_y99 < 0) _gid_y99 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y99) * input_stride + _gid_x99); int _gid_x100 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y100 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x100 >= input_width) _gid_x100 = input_width - 1; if (_gid_y100 < 0) _gid_y100 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y100) * input_stride + _gid_x100); int _gid_x101 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y101 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x101 >= input_width) _gid_x101 = input_width - 1; if (_gid_y101 < 0) _gid_y101 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y101) * input_stride + _gid_x101); int _gid_x102 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y102 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x102 >= input_width) _gid_x102 = input_width - 1; if (_gid_y102 < 0) _gid_y102 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y102) * input_stride + _gid_x102); int _gid_x103 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y103 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x103 >= input_width) _gid_x103 = input_width - 1; if (_gid_y103 < 0) _gid_y103 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y103) * input_stride + _gid_x103); int _gid_x104 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y104 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x104 >= input_width) _gid_x104 = input_width - 1; if (_gid_y104 < 0) _gid_y104 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y104) * input_stride + _gid_x104); int _gid_x105 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y105 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x105 >= input_width) _gid_x105 = input_width - 1; if (_gid_y105 < 0) _gid_y105 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y105) * input_stride + _gid_x105); int _gid_x106 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y106 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x106 >= input_width) _gid_x106 = input_width - 1; if (_gid_y106 < 0) _gid_y106 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y106) * input_stride + _gid_x106); int _gid_x107 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y107 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x107 >= input_width) _gid_x107 = input_width - 1; if (_gid_y107 < 0) _gid_y107 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y107) * input_stride + _gid_x107); int _gid_x108 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y108 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x108 >= input_width) _gid_x108 = input_width - 1; if (_gid_y108 < 0) _gid_y108 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y108) * input_stride + _gid_x108); int _gid_x109 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y109 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x109 >= input_width) _gid_x109 = input_width - 1; if (_gid_y109 < 0) _gid_y109 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y109) * input_stride + _gid_x109); int _gid_x110 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y110 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x110 >= input_width) _gid_x110 = input_width - 1; if (_gid_y110 < 0) _gid_y110 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y110) * input_stride + _gid_x110); int _gid_x111 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y111 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x111 >= input_width) _gid_x111 = input_width - 1; if (_gid_y111 < 0) _gid_y111 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y111) * input_stride + _gid_x111); int _gid_x112 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y112 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x112 >= input_width) _gid_x112 = input_width - 1; if (_gid_y112 < 0) _gid_y112 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y112) * input_stride + _gid_x112); int _gid_x113 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y113 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x113 >= input_width) _gid_x113 = input_width - 1; if (_gid_y113 < 0) _gid_y113 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y113) * input_stride + _gid_x113); int _gid_x114 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y114 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x114 >= input_width) _gid_x114 = input_width - 1; if (_gid_y114 < 0) _gid_y114 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y114) * input_stride + _gid_x114); int _gid_x115 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y115 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x115 >= input_width) _gid_x115 = input_width - 1; if (_gid_y115 < 0) _gid_y115 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y115) * input_stride + _gid_x115); int _gid_x116 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y116 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x116 >= input_width) _gid_x116 = input_width - 1; if (_gid_y116 < 0) _gid_y116 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y116) * input_stride + _gid_x116); int _gid_x117 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y117 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x117 >= input_width) _gid_x117 = input_width - 1; if (_gid_y117 < 0) _gid_y117 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y117) * input_stride + _gid_x117); int _gid_x118 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y118 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x118 >= input_width) _gid_x118 = input_width - 1; if (_gid_y118 < 0) _gid_y118 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y118) * input_stride + _gid_x118); int _gid_x119 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y119 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x119 >= input_width) _gid_x119 = input_width - 1; if (_gid_y119 < 0) _gid_y119 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y119) * input_stride + _gid_x119); int _gid_x120 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y120 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x120 >= input_width) _gid_x120 = input_width - 1; if (_gid_y120 < 0) _gid_y120 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y120) * input_stride + _gid_x120); int _gid_x121 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y121 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x121 >= input_width) _gid_x121 = input_width - 1; if (_gid_y121 < 0) _gid_y121 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y121) * input_stride + _gid_x121); int _gid_x122 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y122 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x122 >= input_width) _gid_x122 = input_width - 1; if (_gid_y122 < 0) _gid_y122 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y122) * input_stride + _gid_x122); int _gid_x123 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y123 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x123 >= input_width) _gid_x123 = input_width - 1; if (_gid_y123 < 0) _gid_y123 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y123) * input_stride + _gid_x123); __syncthreads(); if (gid_x < iter_width) { { float _tmp124 = 0.F; { _tmp124 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp124 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp124 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp124 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp124 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp124 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp124 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp124 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp124 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp124 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp124 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp124 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp124 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp124 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp124 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp124 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp124 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp124 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp124 + 0.5F); } } if (gid_x < iter_width) { { float _tmp125 = 0.F; { _tmp125 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp125 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp125 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp125 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp125 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp125 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp125 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp125 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp125 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp125 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp125 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp125 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp125 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp125 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp125 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp125 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp125 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp125 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp125 + 0.5F); } } if (gid_x < iter_width) { { float _tmp126 = 0.F; { _tmp126 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp126 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp126 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp126 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp126 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp126 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp126 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp126 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp126 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp126 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp126 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp126 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp126 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp126 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp126 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp126 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp126 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp126 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp126 + 0.5F); } } if (gid_x < iter_width) { { float _tmp127 = 0.F; { _tmp127 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp127 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp127 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp127 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp127 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp127 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp127 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp127 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp127 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp127 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp127 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp127 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp127 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp127 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp127 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp127 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp127 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp127 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp127 + 0.5F); } } if (gid_x < iter_width) { { float _tmp128 = 0.F; { _tmp128 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp128 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp128 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp128 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp128 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp128 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp128 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp128 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp128 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp128 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp128 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp128 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp128 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp128 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp128 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp128 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp128 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp128 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp128 + 0.5F); } } if (gid_x < iter_width) { { float _tmp129 = 0.F; { _tmp129 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp129 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp129 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp129 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp129 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp129 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp129 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp129 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp129 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp129 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp129 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp129 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp129 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp129 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp129 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp129 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp129 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp129 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp129 + 0.5F); } } if (gid_x < iter_width) { { float _tmp130 = 0.F; { _tmp130 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp130 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp130 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp130 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp130 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp130 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp130 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp130 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp130 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp130 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp130 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp130 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp130 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp130 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp130 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp130 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp130 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp130 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp130 + 0.5F); } } if (gid_x < iter_width) { { float _tmp131 = 0.F; { _tmp131 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp131 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp131 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp131 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp131 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp131 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp131 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp131 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp131 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp131 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp131 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp131 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp131 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp131 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp131 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp131 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp131 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp131 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp131 + 0.5F); } } } goto BH_EXIT; BH_T: { int _gid_x132 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y132 = gid_y + (-2); if (_gid_y132 < 0) _gid_y132 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y132) * input_stride + _gid_x132); int _gid_x133 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y133 = gid_y + (-2); if (_gid_y133 < 0) _gid_y133 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y133) * input_stride + _gid_x133); int _gid_x134 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y134 = gid_y + (-2); if (_gid_y134 < 0) _gid_y134 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y134) * input_stride + _gid_x134); int _gid_x135 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y135 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y135 < 0) _gid_y135 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y135) * input_stride + _gid_x135); int _gid_x136 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y136 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y136 < 0) _gid_y136 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y136) * input_stride + _gid_x136); int _gid_x137 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y137 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y137 < 0) _gid_y137 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y137) * input_stride + _gid_x137); int _gid_x138 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y138 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y138 < 0) _gid_y138 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y138) * input_stride + _gid_x138); int _gid_x139 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y139 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y139 < 0) _gid_y139 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y139) * input_stride + _gid_x139); int _gid_x140 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y140 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y140 < 0) _gid_y140 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y140) * input_stride + _gid_x140); int _gid_x141 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y141 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y141 < 0) _gid_y141 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y141) * input_stride + _gid_x141); int _gid_x142 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y142 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y142 < 0) _gid_y142 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y142) * input_stride + _gid_x142); int _gid_x143 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y143 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y143 < 0) _gid_y143 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y143) * input_stride + _gid_x143); int _gid_x144 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y144 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y144 < 0) _gid_y144 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y144) * input_stride + _gid_x144); int _gid_x145 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y145 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y145 < 0) _gid_y145 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y145) * input_stride + _gid_x145); int _gid_x146 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y146 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y146 < 0) _gid_y146 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y146) * input_stride + _gid_x146); int _gid_x147 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y147 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y147 < 0) _gid_y147 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y147) * input_stride + _gid_x147); int _gid_x148 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y148 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y148 < 0) _gid_y148 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y148) * input_stride + _gid_x148); int _gid_x149 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y149 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y149 < 0) _gid_y149 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y149) * input_stride + _gid_x149); int _gid_x150 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y150 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y150 < 0) _gid_y150 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y150) * input_stride + _gid_x150); int _gid_x151 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y151 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y151 < 0) _gid_y151 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y151) * input_stride + _gid_x151); int _gid_x152 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y152 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y152 < 0) _gid_y152 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y152) * input_stride + _gid_x152); int _gid_x153 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y153 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y153 < 0) _gid_y153 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y153) * input_stride + _gid_x153); int _gid_x154 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y154 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y154 < 0) _gid_y154 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y154) * input_stride + _gid_x154); int _gid_x155 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y155 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y155 < 0) _gid_y155 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y155) * input_stride + _gid_x155); int _gid_x156 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y156 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y156 < 0) _gid_y156 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y156) * input_stride + _gid_x156); int _gid_x157 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y157 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y157 < 0) _gid_y157 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y157) * input_stride + _gid_x157); int _gid_x158 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y158 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y158 < 0) _gid_y158 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y158) * input_stride + _gid_x158); int _gid_x159 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y159 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y159 < 0) _gid_y159 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y159) * input_stride + _gid_x159); int _gid_x160 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y160 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y160 < 0) _gid_y160 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y160) * input_stride + _gid_x160); int _gid_x161 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y161 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y161 < 0) _gid_y161 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y161) * input_stride + _gid_x161); int _gid_x162 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y162 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y162 < 0) _gid_y162 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y162) * input_stride + _gid_x162); int _gid_x163 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y163 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y163 < 0) _gid_y163 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y163) * input_stride + _gid_x163); int _gid_x164 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y164 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y164 < 0) _gid_y164 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y164) * input_stride + _gid_x164); int _gid_x165 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y165 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y165 < 0) _gid_y165 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y165) * input_stride + _gid_x165); int _gid_x166 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y166 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y166 < 0) _gid_y166 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y166) * input_stride + _gid_x166); int _gid_x167 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y167 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y167 < 0) _gid_y167 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y167) * input_stride + _gid_x167); __syncthreads(); { float _tmp168 = 0.F; { _tmp168 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp168 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp168 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp168 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp168 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp168 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp168 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp168 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp168 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp168 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp168 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp168 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp168 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp168 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp168 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp168 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp168 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp168 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp168 + 0.5F); } { float _tmp169 = 0.F; { _tmp169 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp169 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp169 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp169 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp169 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp169 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp169 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp169 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp169 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp169 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp169 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp169 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp169 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp169 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp169 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp169 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp169 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp169 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp169 + 0.5F); } { float _tmp170 = 0.F; { _tmp170 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp170 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp170 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp170 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp170 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp170 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp170 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp170 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp170 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp170 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp170 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp170 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp170 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp170 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp170 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp170 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp170 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp170 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp170 + 0.5F); } { float _tmp171 = 0.F; { _tmp171 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp171 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp171 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp171 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp171 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp171 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp171 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp171 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp171 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp171 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp171 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp171 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp171 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp171 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp171 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp171 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp171 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp171 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp171 + 0.5F); } { float _tmp172 = 0.F; { _tmp172 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp172 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp172 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp172 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp172 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp172 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp172 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp172 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp172 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp172 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp172 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp172 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp172 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp172 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp172 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp172 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp172 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp172 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp172 + 0.5F); } { float _tmp173 = 0.F; { _tmp173 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp173 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp173 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp173 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp173 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp173 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp173 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp173 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp173 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp173 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp173 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp173 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp173 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp173 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp173 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp173 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp173 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp173 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp173 + 0.5F); } { float _tmp174 = 0.F; { _tmp174 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp174 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp174 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp174 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp174 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp174 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp174 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp174 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp174 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp174 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp174 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp174 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp174 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp174 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp174 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp174 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp174 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp174 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp174 + 0.5F); } { float _tmp175 = 0.F; { _tmp175 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp175 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp175 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp175 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp175 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp175 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp175 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp175 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp175 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp175 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp175 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp175 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp175 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp175 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp175 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp175 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp175 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp175 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp175 + 0.5F); } } goto BH_EXIT; BH_BL: { int _gid_x176 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y176 = gid_y + (-2); if (_gid_y176 >= input_height) _gid_y176 = input_height - 1; if (_gid_x176 < 0) _gid_x176 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y176) * input_stride + _gid_x176); int _gid_x177 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y177 = gid_y + (-2); if (_gid_y177 >= input_height) _gid_y177 = input_height - 1; if (_gid_x177 < 0) _gid_x177 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y177) * input_stride + _gid_x177); int _gid_x178 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y178 = gid_y + (-2); if (_gid_y178 >= input_height) _gid_y178 = input_height - 1; if (_gid_x178 < 0) _gid_x178 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y178) * input_stride + _gid_x178); int _gid_x179 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y179 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y179 >= input_height) _gid_y179 = input_height - 1; if (_gid_x179 < 0) _gid_x179 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y179) * input_stride + _gid_x179); int _gid_x180 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y180 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y180 >= input_height) _gid_y180 = input_height - 1; if (_gid_x180 < 0) _gid_x180 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y180) * input_stride + _gid_x180); int _gid_x181 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y181 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y181 >= input_height) _gid_y181 = input_height - 1; if (_gid_x181 < 0) _gid_x181 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y181) * input_stride + _gid_x181); int _gid_x182 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y182 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y182 >= input_height) _gid_y182 = input_height - 1; if (_gid_x182 < 0) _gid_x182 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y182) * input_stride + _gid_x182); int _gid_x183 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y183 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y183 >= input_height) _gid_y183 = input_height - 1; if (_gid_x183 < 0) _gid_x183 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y183) * input_stride + _gid_x183); int _gid_x184 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y184 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y184 >= input_height) _gid_y184 = input_height - 1; if (_gid_x184 < 0) _gid_x184 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y184) * input_stride + _gid_x184); int _gid_x185 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y185 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y185 >= input_height) _gid_y185 = input_height - 1; if (_gid_x185 < 0) _gid_x185 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y185) * input_stride + _gid_x185); int _gid_x186 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y186 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y186 >= input_height) _gid_y186 = input_height - 1; if (_gid_x186 < 0) _gid_x186 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y186) * input_stride + _gid_x186); int _gid_x187 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y187 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y187 >= input_height) _gid_y187 = input_height - 1; if (_gid_x187 < 0) _gid_x187 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y187) * input_stride + _gid_x187); int _gid_x188 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y188 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y188 >= input_height) _gid_y188 = input_height - 1; if (_gid_x188 < 0) _gid_x188 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y188) * input_stride + _gid_x188); int _gid_x189 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y189 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y189 >= input_height) _gid_y189 = input_height - 1; if (_gid_x189 < 0) _gid_x189 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y189) * input_stride + _gid_x189); int _gid_x190 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y190 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y190 >= input_height) _gid_y190 = input_height - 1; if (_gid_x190 < 0) _gid_x190 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y190) * input_stride + _gid_x190); int _gid_x191 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y191 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y191 >= input_height) _gid_y191 = input_height - 1; if (_gid_x191 < 0) _gid_x191 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y191) * input_stride + _gid_x191); int _gid_x192 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y192 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y192 >= input_height) _gid_y192 = input_height - 1; if (_gid_x192 < 0) _gid_x192 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y192) * input_stride + _gid_x192); int _gid_x193 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y193 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y193 >= input_height) _gid_y193 = input_height - 1; if (_gid_x193 < 0) _gid_x193 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y193) * input_stride + _gid_x193); int _gid_x194 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y194 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y194 >= input_height) _gid_y194 = input_height - 1; if (_gid_x194 < 0) _gid_x194 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y194) * input_stride + _gid_x194); int _gid_x195 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y195 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y195 >= input_height) _gid_y195 = input_height - 1; if (_gid_x195 < 0) _gid_x195 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y195) * input_stride + _gid_x195); int _gid_x196 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y196 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y196 >= input_height) _gid_y196 = input_height - 1; if (_gid_x196 < 0) _gid_x196 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y196) * input_stride + _gid_x196); int _gid_x197 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y197 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y197 >= input_height) _gid_y197 = input_height - 1; if (_gid_x197 < 0) _gid_x197 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y197) * input_stride + _gid_x197); int _gid_x198 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y198 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y198 >= input_height) _gid_y198 = input_height - 1; if (_gid_x198 < 0) _gid_x198 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y198) * input_stride + _gid_x198); int _gid_x199 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y199 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y199 >= input_height) _gid_y199 = input_height - 1; if (_gid_x199 < 0) _gid_x199 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y199) * input_stride + _gid_x199); int _gid_x200 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y200 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y200 >= input_height) _gid_y200 = input_height - 1; if (_gid_x200 < 0) _gid_x200 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y200) * input_stride + _gid_x200); int _gid_x201 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y201 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y201 >= input_height) _gid_y201 = input_height - 1; if (_gid_x201 < 0) _gid_x201 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y201) * input_stride + _gid_x201); int _gid_x202 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y202 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y202 >= input_height) _gid_y202 = input_height - 1; if (_gid_x202 < 0) _gid_x202 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y202) * input_stride + _gid_x202); int _gid_x203 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y203 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y203 >= input_height) _gid_y203 = input_height - 1; if (_gid_x203 < 0) _gid_x203 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y203) * input_stride + _gid_x203); int _gid_x204 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y204 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y204 >= input_height) _gid_y204 = input_height - 1; if (_gid_x204 < 0) _gid_x204 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y204) * input_stride + _gid_x204); int _gid_x205 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y205 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y205 >= input_height) _gid_y205 = input_height - 1; if (_gid_x205 < 0) _gid_x205 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y205) * input_stride + _gid_x205); int _gid_x206 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y206 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y206 >= input_height) _gid_y206 = input_height - 1; if (_gid_x206 < 0) _gid_x206 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y206) * input_stride + _gid_x206); int _gid_x207 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y207 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y207 >= input_height) _gid_y207 = input_height - 1; if (_gid_x207 < 0) _gid_x207 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y207) * input_stride + _gid_x207); int _gid_x208 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y208 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y208 >= input_height) _gid_y208 = input_height - 1; if (_gid_x208 < 0) _gid_x208 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y208) * input_stride + _gid_x208); int _gid_x209 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y209 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y209 >= input_height) _gid_y209 = input_height - 1; if (_gid_x209 < 0) _gid_x209 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y209) * input_stride + _gid_x209); int _gid_x210 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y210 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y210 >= input_height) _gid_y210 = input_height - 1; if (_gid_x210 < 0) _gid_x210 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y210) * input_stride + _gid_x210); int _gid_x211 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y211 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y211 >= input_height) _gid_y211 = input_height - 1; if (_gid_x211 < 0) _gid_x211 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y211) * input_stride + _gid_x211); __syncthreads(); if (gid_y < iter_height) { float _tmp212 = 0.F; { _tmp212 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp212 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp212 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp212 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp212 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp212 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp212 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp212 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp212 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp212 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp212 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp212 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp212 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp212 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp212 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp212 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp212 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp212 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp212 + 0.5F); } if (gid_y + 1 * (int)blockDim.y < iter_height) { float _tmp213 = 0.F; { _tmp213 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp213 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp213 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp213 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp213 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp213 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp213 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp213 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp213 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp213 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp213 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp213 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp213 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp213 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp213 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp213 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp213 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp213 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp213 + 0.5F); } if (gid_y + 2 * (int)blockDim.y < iter_height) { float _tmp214 = 0.F; { _tmp214 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp214 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp214 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp214 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp214 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp214 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp214 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp214 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp214 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp214 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp214 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp214 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp214 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp214 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp214 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp214 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp214 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp214 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp214 + 0.5F); } if (gid_y + 3 * (int)blockDim.y < iter_height) { float _tmp215 = 0.F; { _tmp215 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp215 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp215 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp215 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp215 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp215 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp215 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp215 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp215 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp215 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp215 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp215 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp215 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp215 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp215 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp215 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp215 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp215 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp215 + 0.5F); } if (gid_y + 4 * (int)blockDim.y < iter_height) { float _tmp216 = 0.F; { _tmp216 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp216 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp216 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp216 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp216 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp216 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp216 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp216 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp216 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp216 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp216 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp216 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp216 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp216 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp216 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp216 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp216 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp216 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp216 + 0.5F); } if (gid_y + 5 * (int)blockDim.y < iter_height) { float _tmp217 = 0.F; { _tmp217 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp217 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp217 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp217 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp217 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp217 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp217 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp217 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp217 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp217 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp217 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp217 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp217 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp217 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp217 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp217 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp217 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp217 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp217 + 0.5F); } if (gid_y + 6 * (int)blockDim.y < iter_height) { float _tmp218 = 0.F; { _tmp218 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp218 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp218 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp218 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp218 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp218 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp218 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp218 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp218 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp218 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp218 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp218 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp218 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp218 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp218 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp218 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp218 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp218 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp218 + 0.5F); } if (gid_y + 7 * (int)blockDim.y < iter_height) { float _tmp219 = 0.F; { _tmp219 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp219 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp219 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp219 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp219 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp219 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp219 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp219 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp219 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp219 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp219 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp219 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp219 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp219 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp219 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp219 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp219 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp219 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp219 + 0.5F); } } goto BH_EXIT; BH_BR: { int _gid_x220 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y220 = gid_y + (-2); if (_gid_x220 >= input_width) _gid_x220 = input_width - 1; if (_gid_y220 >= input_height) _gid_y220 = input_height - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y220) * input_stride + _gid_x220); int _gid_x221 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y221 = gid_y + (-2); if (_gid_x221 >= input_width) _gid_x221 = input_width - 1; if (_gid_y221 >= input_height) _gid_y221 = input_height - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y221) * input_stride + _gid_x221); int _gid_x222 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y222 = gid_y + (-2); if (_gid_x222 >= input_width) _gid_x222 = input_width - 1; if (_gid_y222 >= input_height) _gid_y222 = input_height - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y222) * input_stride + _gid_x222); int _gid_x223 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y223 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x223 >= input_width) _gid_x223 = input_width - 1; if (_gid_y223 >= input_height) _gid_y223 = input_height - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y223) * input_stride + _gid_x223); int _gid_x224 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y224 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x224 >= input_width) _gid_x224 = input_width - 1; if (_gid_y224 >= input_height) _gid_y224 = input_height - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y224) * input_stride + _gid_x224); int _gid_x225 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y225 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x225 >= input_width) _gid_x225 = input_width - 1; if (_gid_y225 >= input_height) _gid_y225 = input_height - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y225) * input_stride + _gid_x225); int _gid_x226 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y226 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x226 >= input_width) _gid_x226 = input_width - 1; if (_gid_y226 >= input_height) _gid_y226 = input_height - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y226) * input_stride + _gid_x226); int _gid_x227 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y227 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x227 >= input_width) _gid_x227 = input_width - 1; if (_gid_y227 >= input_height) _gid_y227 = input_height - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y227) * input_stride + _gid_x227); int _gid_x228 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y228 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x228 >= input_width) _gid_x228 = input_width - 1; if (_gid_y228 >= input_height) _gid_y228 = input_height - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y228) * input_stride + _gid_x228); int _gid_x229 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y229 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x229 >= input_width) _gid_x229 = input_width - 1; if (_gid_y229 >= input_height) _gid_y229 = input_height - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y229) * input_stride + _gid_x229); int _gid_x230 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y230 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x230 >= input_width) _gid_x230 = input_width - 1; if (_gid_y230 >= input_height) _gid_y230 = input_height - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y230) * input_stride + _gid_x230); int _gid_x231 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y231 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x231 >= input_width) _gid_x231 = input_width - 1; if (_gid_y231 >= input_height) _gid_y231 = input_height - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y231) * input_stride + _gid_x231); int _gid_x232 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y232 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x232 >= input_width) _gid_x232 = input_width - 1; if (_gid_y232 >= input_height) _gid_y232 = input_height - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y232) * input_stride + _gid_x232); int _gid_x233 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y233 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x233 >= input_width) _gid_x233 = input_width - 1; if (_gid_y233 >= input_height) _gid_y233 = input_height - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y233) * input_stride + _gid_x233); int _gid_x234 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y234 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x234 >= input_width) _gid_x234 = input_width - 1; if (_gid_y234 >= input_height) _gid_y234 = input_height - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y234) * input_stride + _gid_x234); int _gid_x235 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y235 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x235 >= input_width) _gid_x235 = input_width - 1; if (_gid_y235 >= input_height) _gid_y235 = input_height - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y235) * input_stride + _gid_x235); int _gid_x236 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y236 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x236 >= input_width) _gid_x236 = input_width - 1; if (_gid_y236 >= input_height) _gid_y236 = input_height - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y236) * input_stride + _gid_x236); int _gid_x237 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y237 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x237 >= input_width) _gid_x237 = input_width - 1; if (_gid_y237 >= input_height) _gid_y237 = input_height - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y237) * input_stride + _gid_x237); int _gid_x238 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y238 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x238 >= input_width) _gid_x238 = input_width - 1; if (_gid_y238 >= input_height) _gid_y238 = input_height - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y238) * input_stride + _gid_x238); int _gid_x239 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y239 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x239 >= input_width) _gid_x239 = input_width - 1; if (_gid_y239 >= input_height) _gid_y239 = input_height - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y239) * input_stride + _gid_x239); int _gid_x240 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y240 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x240 >= input_width) _gid_x240 = input_width - 1; if (_gid_y240 >= input_height) _gid_y240 = input_height - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y240) * input_stride + _gid_x240); int _gid_x241 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y241 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x241 >= input_width) _gid_x241 = input_width - 1; if (_gid_y241 >= input_height) _gid_y241 = input_height - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y241) * input_stride + _gid_x241); int _gid_x242 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y242 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x242 >= input_width) _gid_x242 = input_width - 1; if (_gid_y242 >= input_height) _gid_y242 = input_height - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y242) * input_stride + _gid_x242); int _gid_x243 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y243 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x243 >= input_width) _gid_x243 = input_width - 1; if (_gid_y243 >= input_height) _gid_y243 = input_height - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y243) * input_stride + _gid_x243); int _gid_x244 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y244 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x244 >= input_width) _gid_x244 = input_width - 1; if (_gid_y244 >= input_height) _gid_y244 = input_height - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y244) * input_stride + _gid_x244); int _gid_x245 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y245 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x245 >= input_width) _gid_x245 = input_width - 1; if (_gid_y245 >= input_height) _gid_y245 = input_height - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y245) * input_stride + _gid_x245); int _gid_x246 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y246 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x246 >= input_width) _gid_x246 = input_width - 1; if (_gid_y246 >= input_height) _gid_y246 = input_height - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y246) * input_stride + _gid_x246); int _gid_x247 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y247 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x247 >= input_width) _gid_x247 = input_width - 1; if (_gid_y247 >= input_height) _gid_y247 = input_height - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y247) * input_stride + _gid_x247); int _gid_x248 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y248 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x248 >= input_width) _gid_x248 = input_width - 1; if (_gid_y248 >= input_height) _gid_y248 = input_height - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y248) * input_stride + _gid_x248); int _gid_x249 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y249 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x249 >= input_width) _gid_x249 = input_width - 1; if (_gid_y249 >= input_height) _gid_y249 = input_height - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y249) * input_stride + _gid_x249); int _gid_x250 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y250 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x250 >= input_width) _gid_x250 = input_width - 1; if (_gid_y250 >= input_height) _gid_y250 = input_height - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y250) * input_stride + _gid_x250); int _gid_x251 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y251 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x251 >= input_width) _gid_x251 = input_width - 1; if (_gid_y251 >= input_height) _gid_y251 = input_height - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y251) * input_stride + _gid_x251); int _gid_x252 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y252 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x252 >= input_width) _gid_x252 = input_width - 1; if (_gid_y252 >= input_height) _gid_y252 = input_height - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y252) * input_stride + _gid_x252); int _gid_x253 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y253 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x253 >= input_width) _gid_x253 = input_width - 1; if (_gid_y253 >= input_height) _gid_y253 = input_height - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y253) * input_stride + _gid_x253); int _gid_x254 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y254 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x254 >= input_width) _gid_x254 = input_width - 1; if (_gid_y254 >= input_height) _gid_y254 = input_height - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y254) * input_stride + _gid_x254); int _gid_x255 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y255 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x255 >= input_width) _gid_x255 = input_width - 1; if (_gid_y255 >= input_height) _gid_y255 = input_height - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y255) * input_stride + _gid_x255); __syncthreads(); if (gid_x < iter_width) { if (gid_y < iter_height) { float _tmp256 = 0.F; { _tmp256 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp256 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp256 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp256 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp256 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp256 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp256 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp256 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp256 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp256 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp256 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp256 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp256 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp256 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp256 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp256 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp256 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp256 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp256 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 1 * (int)blockDim.y < iter_height) { float _tmp257 = 0.F; { _tmp257 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp257 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp257 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp257 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp257 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp257 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp257 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp257 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp257 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp257 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp257 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp257 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp257 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp257 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp257 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp257 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp257 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp257 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp257 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 2 * (int)blockDim.y < iter_height) { float _tmp258 = 0.F; { _tmp258 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp258 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp258 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp258 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp258 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp258 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp258 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp258 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp258 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp258 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp258 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp258 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp258 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp258 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp258 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp258 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp258 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp258 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp258 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 3 * (int)blockDim.y < iter_height) { float _tmp259 = 0.F; { _tmp259 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp259 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp259 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp259 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp259 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp259 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp259 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp259 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp259 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp259 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp259 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp259 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp259 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp259 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp259 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp259 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp259 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp259 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp259 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 4 * (int)blockDim.y < iter_height) { float _tmp260 = 0.F; { _tmp260 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp260 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp260 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp260 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp260 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp260 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp260 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp260 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp260 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp260 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp260 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp260 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp260 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp260 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp260 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp260 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp260 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp260 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp260 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 5 * (int)blockDim.y < iter_height) { float _tmp261 = 0.F; { _tmp261 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp261 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp261 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp261 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp261 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp261 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp261 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp261 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp261 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp261 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp261 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp261 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp261 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp261 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp261 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp261 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp261 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp261 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp261 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 6 * (int)blockDim.y < iter_height) { float _tmp262 = 0.F; { _tmp262 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp262 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp262 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp262 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp262 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp262 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp262 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp262 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp262 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp262 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp262 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp262 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp262 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp262 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp262 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp262 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp262 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp262 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp262 + 0.5F); } } if (gid_x < iter_width) { if (gid_y + 7 * (int)blockDim.y < iter_height) { float _tmp263 = 0.F; { _tmp263 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp263 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp263 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp263 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp263 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp263 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp263 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp263 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp263 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp263 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp263 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp263 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp263 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp263 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp263 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp263 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp263 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp263 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp263 + 0.5F); } } } goto BH_EXIT; BH_B: { int _gid_x264 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y264 = gid_y + (-2); if (_gid_y264 >= input_height) _gid_y264 = input_height - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y264) * input_stride + _gid_x264); int _gid_x265 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y265 = gid_y + (-2); if (_gid_y265 >= input_height) _gid_y265 = input_height - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y265) * input_stride + _gid_x265); int _gid_x266 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y266 = gid_y + (-2); if (_gid_y266 >= input_height) _gid_y266 = input_height - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y266) * input_stride + _gid_x266); int _gid_x267 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y267 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y267 >= input_height) _gid_y267 = input_height - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y267) * input_stride + _gid_x267); int _gid_x268 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y268 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y268 >= input_height) _gid_y268 = input_height - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y268) * input_stride + _gid_x268); int _gid_x269 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y269 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_y269 >= input_height) _gid_y269 = input_height - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y269) * input_stride + _gid_x269); int _gid_x270 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y270 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y270 >= input_height) _gid_y270 = input_height - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y270) * input_stride + _gid_x270); int _gid_x271 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y271 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y271 >= input_height) _gid_y271 = input_height - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y271) * input_stride + _gid_x271); int _gid_x272 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y272 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_y272 >= input_height) _gid_y272 = input_height - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y272) * input_stride + _gid_x272); int _gid_x273 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y273 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y273 >= input_height) _gid_y273 = input_height - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y273) * input_stride + _gid_x273); int _gid_x274 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y274 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y274 >= input_height) _gid_y274 = input_height - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y274) * input_stride + _gid_x274); int _gid_x275 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y275 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_y275 >= input_height) _gid_y275 = input_height - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y275) * input_stride + _gid_x275); int _gid_x276 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y276 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y276 >= input_height) _gid_y276 = input_height - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y276) * input_stride + _gid_x276); int _gid_x277 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y277 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y277 >= input_height) _gid_y277 = input_height - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y277) * input_stride + _gid_x277); int _gid_x278 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y278 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_y278 >= input_height) _gid_y278 = input_height - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y278) * input_stride + _gid_x278); int _gid_x279 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y279 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y279 >= input_height) _gid_y279 = input_height - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y279) * input_stride + _gid_x279); int _gid_x280 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y280 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y280 >= input_height) _gid_y280 = input_height - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y280) * input_stride + _gid_x280); int _gid_x281 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y281 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_y281 >= input_height) _gid_y281 = input_height - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y281) * input_stride + _gid_x281); int _gid_x282 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y282 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y282 >= input_height) _gid_y282 = input_height - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y282) * input_stride + _gid_x282); int _gid_x283 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y283 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y283 >= input_height) _gid_y283 = input_height - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y283) * input_stride + _gid_x283); int _gid_x284 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y284 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_y284 >= input_height) _gid_y284 = input_height - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y284) * input_stride + _gid_x284); int _gid_x285 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y285 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y285 >= input_height) _gid_y285 = input_height - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y285) * input_stride + _gid_x285); int _gid_x286 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y286 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y286 >= input_height) _gid_y286 = input_height - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y286) * input_stride + _gid_x286); int _gid_x287 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y287 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_y287 >= input_height) _gid_y287 = input_height - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y287) * input_stride + _gid_x287); int _gid_x288 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y288 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y288 >= input_height) _gid_y288 = input_height - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y288) * input_stride + _gid_x288); int _gid_x289 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y289 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y289 >= input_height) _gid_y289 = input_height - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y289) * input_stride + _gid_x289); int _gid_x290 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y290 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_y290 >= input_height) _gid_y290 = input_height - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y290) * input_stride + _gid_x290); int _gid_x291 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y291 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y291 >= input_height) _gid_y291 = input_height - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y291) * input_stride + _gid_x291); int _gid_x292 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y292 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y292 >= input_height) _gid_y292 = input_height - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y292) * input_stride + _gid_x292); int _gid_x293 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y293 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_y293 >= input_height) _gid_y293 = input_height - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y293) * input_stride + _gid_x293); int _gid_x294 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y294 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y294 >= input_height) _gid_y294 = input_height - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y294) * input_stride + _gid_x294); int _gid_x295 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y295 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y295 >= input_height) _gid_y295 = input_height - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y295) * input_stride + _gid_x295); int _gid_x296 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y296 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_y296 >= input_height) _gid_y296 = input_height - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y296) * input_stride + _gid_x296); int _gid_x297 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y297 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y297 >= input_height) _gid_y297 = input_height - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y297) * input_stride + _gid_x297); int _gid_x298 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y298 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y298 >= input_height) _gid_y298 = input_height - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y298) * input_stride + _gid_x298); int _gid_x299 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y299 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_y299 >= input_height) _gid_y299 = input_height - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y299) * input_stride + _gid_x299); __syncthreads(); if (gid_y < iter_height) { float _tmp300 = 0.F; { _tmp300 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp300 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp300 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp300 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp300 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp300 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp300 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp300 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp300 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp300 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp300 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp300 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp300 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp300 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp300 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp300 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp300 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp300 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp300 + 0.5F); } if (gid_y + 1 * (int)blockDim.y < iter_height) { float _tmp301 = 0.F; { _tmp301 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp301 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp301 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp301 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp301 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp301 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp301 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp301 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp301 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp301 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp301 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp301 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp301 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp301 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp301 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp301 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp301 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp301 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp301 + 0.5F); } if (gid_y + 2 * (int)blockDim.y < iter_height) { float _tmp302 = 0.F; { _tmp302 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp302 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp302 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp302 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp302 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp302 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp302 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp302 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp302 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp302 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp302 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp302 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp302 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp302 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp302 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp302 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp302 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp302 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp302 + 0.5F); } if (gid_y + 3 * (int)blockDim.y < iter_height) { float _tmp303 = 0.F; { _tmp303 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp303 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp303 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp303 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp303 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp303 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp303 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp303 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp303 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp303 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp303 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp303 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp303 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp303 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp303 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp303 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp303 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp303 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp303 + 0.5F); } if (gid_y + 4 * (int)blockDim.y < iter_height) { float _tmp304 = 0.F; { _tmp304 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp304 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp304 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp304 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp304 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp304 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp304 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp304 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp304 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp304 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp304 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp304 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp304 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp304 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp304 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp304 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp304 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp304 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp304 + 0.5F); } if (gid_y + 5 * (int)blockDim.y < iter_height) { float _tmp305 = 0.F; { _tmp305 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp305 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp305 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp305 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp305 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp305 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp305 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp305 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp305 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp305 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp305 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp305 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp305 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp305 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp305 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp305 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp305 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp305 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp305 + 0.5F); } if (gid_y + 6 * (int)blockDim.y < iter_height) { float _tmp306 = 0.F; { _tmp306 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp306 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp306 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp306 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp306 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp306 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp306 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp306 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp306 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp306 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp306 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp306 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp306 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp306 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp306 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp306 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp306 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp306 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp306 + 0.5F); } if (gid_y + 7 * (int)blockDim.y < iter_height) { float _tmp307 = 0.F; { _tmp307 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp307 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp307 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp307 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp307 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp307 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp307 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp307 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp307 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp307 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp307 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp307 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp307 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp307 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp307 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp307 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp307 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp307 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp307 + 0.5F); } } goto BH_EXIT; BH_R: { int _gid_x308 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y308 = gid_y + (-2); if (_gid_x308 >= input_width) _gid_x308 = input_width - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y308) * input_stride + _gid_x308); int _gid_x309 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y309 = gid_y + (-2); if (_gid_x309 >= input_width) _gid_x309 = input_width - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y309) * input_stride + _gid_x309); int _gid_x310 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y310 = gid_y + (-2); if (_gid_x310 >= input_width) _gid_x310 = input_width - 1; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y310) * input_stride + _gid_x310); int _gid_x311 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y311 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x311 >= input_width) _gid_x311 = input_width - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y311) * input_stride + _gid_x311); int _gid_x312 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y312 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x312 >= input_width) _gid_x312 = input_width - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y312) * input_stride + _gid_x312); int _gid_x313 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y313 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x313 >= input_width) _gid_x313 = input_width - 1; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y313) * input_stride + _gid_x313); int _gid_x314 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y314 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x314 >= input_width) _gid_x314 = input_width - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y314) * input_stride + _gid_x314); int _gid_x315 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y315 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x315 >= input_width) _gid_x315 = input_width - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y315) * input_stride + _gid_x315); int _gid_x316 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y316 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x316 >= input_width) _gid_x316 = input_width - 1; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y316) * input_stride + _gid_x316); int _gid_x317 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y317 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x317 >= input_width) _gid_x317 = input_width - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y317) * input_stride + _gid_x317); int _gid_x318 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y318 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x318 >= input_width) _gid_x318 = input_width - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y318) * input_stride + _gid_x318); int _gid_x319 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y319 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x319 >= input_width) _gid_x319 = input_width - 1; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y319) * input_stride + _gid_x319); int _gid_x320 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y320 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x320 >= input_width) _gid_x320 = input_width - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y320) * input_stride + _gid_x320); int _gid_x321 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y321 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x321 >= input_width) _gid_x321 = input_width - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y321) * input_stride + _gid_x321); int _gid_x322 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y322 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x322 >= input_width) _gid_x322 = input_width - 1; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y322) * input_stride + _gid_x322); int _gid_x323 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y323 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x323 >= input_width) _gid_x323 = input_width - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y323) * input_stride + _gid_x323); int _gid_x324 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y324 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x324 >= input_width) _gid_x324 = input_width - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y324) * input_stride + _gid_x324); int _gid_x325 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y325 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x325 >= input_width) _gid_x325 = input_width - 1; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y325) * input_stride + _gid_x325); int _gid_x326 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y326 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x326 >= input_width) _gid_x326 = input_width - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y326) * input_stride + _gid_x326); int _gid_x327 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y327 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x327 >= input_width) _gid_x327 = input_width - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y327) * input_stride + _gid_x327); int _gid_x328 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y328 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x328 >= input_width) _gid_x328 = input_width - 1; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y328) * input_stride + _gid_x328); int _gid_x329 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y329 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x329 >= input_width) _gid_x329 = input_width - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y329) * input_stride + _gid_x329); int _gid_x330 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y330 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x330 >= input_width) _gid_x330 = input_width - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y330) * input_stride + _gid_x330); int _gid_x331 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y331 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x331 >= input_width) _gid_x331 = input_width - 1; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y331) * input_stride + _gid_x331); int _gid_x332 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y332 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x332 >= input_width) _gid_x332 = input_width - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y332) * input_stride + _gid_x332); int _gid_x333 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y333 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x333 >= input_width) _gid_x333 = input_width - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y333) * input_stride + _gid_x333); int _gid_x334 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y334 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x334 >= input_width) _gid_x334 = input_width - 1; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y334) * input_stride + _gid_x334); int _gid_x335 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y335 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x335 >= input_width) _gid_x335 = input_width - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y335) * input_stride + _gid_x335); int _gid_x336 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y336 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x336 >= input_width) _gid_x336 = input_width - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y336) * input_stride + _gid_x336); int _gid_x337 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y337 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x337 >= input_width) _gid_x337 = input_width - 1; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y337) * input_stride + _gid_x337); int _gid_x338 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y338 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x338 >= input_width) _gid_x338 = input_width - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y338) * input_stride + _gid_x338); int _gid_x339 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y339 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x339 >= input_width) _gid_x339 = input_width - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y339) * input_stride + _gid_x339); int _gid_x340 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y340 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x340 >= input_width) _gid_x340 = input_width - 1; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y340) * input_stride + _gid_x340); int _gid_x341 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y341 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x341 >= input_width) _gid_x341 = input_width - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y341) * input_stride + _gid_x341); int _gid_x342 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y342 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x342 >= input_width) _gid_x342 = input_width - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y342) * input_stride + _gid_x342); int _gid_x343 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y343 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x343 >= input_width) _gid_x343 = input_width - 1; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y343) * input_stride + _gid_x343); __syncthreads(); if (gid_x < iter_width) { { float _tmp344 = 0.F; { _tmp344 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp344 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp344 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp344 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp344 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp344 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp344 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp344 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp344 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp344 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp344 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp344 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp344 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp344 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp344 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp344 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp344 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp344 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp344 + 0.5F); } } if (gid_x < iter_width) { { float _tmp345 = 0.F; { _tmp345 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp345 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp345 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp345 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp345 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp345 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp345 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp345 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp345 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp345 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp345 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp345 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp345 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp345 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp345 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp345 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp345 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp345 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp345 + 0.5F); } } if (gid_x < iter_width) { { float _tmp346 = 0.F; { _tmp346 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp346 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp346 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp346 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp346 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp346 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp346 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp346 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp346 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp346 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp346 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp346 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp346 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp346 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp346 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp346 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp346 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp346 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp346 + 0.5F); } } if (gid_x < iter_width) { { float _tmp347 = 0.F; { _tmp347 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp347 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp347 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp347 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp347 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp347 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp347 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp347 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp347 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp347 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp347 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp347 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp347 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp347 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp347 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp347 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp347 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp347 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp347 + 0.5F); } } if (gid_x < iter_width) { { float _tmp348 = 0.F; { _tmp348 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp348 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp348 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp348 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp348 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp348 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp348 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp348 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp348 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp348 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp348 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp348 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp348 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp348 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp348 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp348 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp348 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp348 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp348 + 0.5F); } } if (gid_x < iter_width) { { float _tmp349 = 0.F; { _tmp349 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp349 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp349 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp349 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp349 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp349 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp349 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp349 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp349 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp349 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp349 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp349 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp349 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp349 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp349 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp349 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp349 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp349 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp349 + 0.5F); } } if (gid_x < iter_width) { { float _tmp350 = 0.F; { _tmp350 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp350 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp350 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp350 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp350 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp350 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp350 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp350 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp350 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp350 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp350 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp350 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp350 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp350 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp350 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp350 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp350 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp350 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp350 + 0.5F); } } if (gid_x < iter_width) { { float _tmp351 = 0.F; { _tmp351 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp351 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp351 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp351 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp351 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp351 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp351 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp351 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp351 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp351 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp351 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp351 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp351 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp351 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp351 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp351 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp351 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp351 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp351 + 0.5F); } } } goto BH_EXIT; BH_L: { int _gid_x352 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y352 = gid_y + (-2); if (_gid_x352 < 0) _gid_x352 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y352) * input_stride + _gid_x352); int _gid_x353 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y353 = gid_y + (-2); if (_gid_x353 < 0) _gid_x353 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y353) * input_stride + _gid_x353); int _gid_x354 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y354 = gid_y + (-2); if (_gid_x354 < 0) _gid_x354 = 0; _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y354) * input_stride + _gid_x354); int _gid_x355 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y355 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x355 < 0) _gid_x355 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y355) * input_stride + _gid_x355); int _gid_x356 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y356 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x356 < 0) _gid_x356 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y356) * input_stride + _gid_x356); int _gid_x357 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y357 = gid_y + 1 * (int)blockDim.y + (-2); if (_gid_x357 < 0) _gid_x357 = 0; _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y357) * input_stride + _gid_x357); int _gid_x358 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y358 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x358 < 0) _gid_x358 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y358) * input_stride + _gid_x358); int _gid_x359 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y359 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x359 < 0) _gid_x359 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y359) * input_stride + _gid_x359); int _gid_x360 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y360 = gid_y + 2 * (int)blockDim.y + (-2); if (_gid_x360 < 0) _gid_x360 = 0; _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y360) * input_stride + _gid_x360); int _gid_x361 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y361 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x361 < 0) _gid_x361 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y361) * input_stride + _gid_x361); int _gid_x362 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y362 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x362 < 0) _gid_x362 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y362) * input_stride + _gid_x362); int _gid_x363 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y363 = gid_y + 3 * (int)blockDim.y + (-2); if (_gid_x363 < 0) _gid_x363 = 0; _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y363) * input_stride + _gid_x363); int _gid_x364 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y364 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x364 < 0) _gid_x364 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y364) * input_stride + _gid_x364); int _gid_x365 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y365 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x365 < 0) _gid_x365 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y365) * input_stride + _gid_x365); int _gid_x366 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y366 = gid_y + 4 * (int)blockDim.y + (-2); if (_gid_x366 < 0) _gid_x366 = 0; _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y366) * input_stride + _gid_x366); int _gid_x367 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y367 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x367 < 0) _gid_x367 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y367) * input_stride + _gid_x367); int _gid_x368 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y368 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x368 < 0) _gid_x368 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y368) * input_stride + _gid_x368); int _gid_x369 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y369 = gid_y + 5 * (int)blockDim.y + (-2); if (_gid_x369 < 0) _gid_x369 = 0; _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y369) * input_stride + _gid_x369); int _gid_x370 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y370 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x370 < 0) _gid_x370 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y370) * input_stride + _gid_x370); int _gid_x371 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y371 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x371 < 0) _gid_x371 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y371) * input_stride + _gid_x371); int _gid_x372 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y372 = gid_y + 6 * (int)blockDim.y + (-2); if (_gid_x372 < 0) _gid_x372 = 0; _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y372) * input_stride + _gid_x372); int _gid_x373 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y373 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x373 < 0) _gid_x373 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y373) * input_stride + _gid_x373); int _gid_x374 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y374 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x374 < 0) _gid_x374 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y374) * input_stride + _gid_x374); int _gid_x375 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y375 = gid_y + 7 * (int)blockDim.y + (-2); if (_gid_x375 < 0) _gid_x375 = 0; _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y375) * input_stride + _gid_x375); int _gid_x376 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y376 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x376 < 0) _gid_x376 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y376) * input_stride + _gid_x376); int _gid_x377 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y377 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x377 < 0) _gid_x377 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y377) * input_stride + _gid_x377); int _gid_x378 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y378 = gid_y + 8 * (int)blockDim.y + (-2); if (_gid_x378 < 0) _gid_x378 = 0; _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y378) * input_stride + _gid_x378); int _gid_x379 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y379 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x379 < 0) _gid_x379 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y379) * input_stride + _gid_x379); int _gid_x380 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y380 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x380 < 0) _gid_x380 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y380) * input_stride + _gid_x380); int _gid_x381 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y381 = gid_y + 9 * (int)blockDim.y + (-2); if (_gid_x381 < 0) _gid_x381 = 0; _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y381) * input_stride + _gid_x381); int _gid_x382 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y382 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x382 < 0) _gid_x382 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y382) * input_stride + _gid_x382); int _gid_x383 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y383 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x383 < 0) _gid_x383 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y383) * input_stride + _gid_x383); int _gid_x384 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y384 = gid_y + 10 * (int)blockDim.y + (-2); if (_gid_x384 < 0) _gid_x384 = 0; _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y384) * input_stride + _gid_x384); int _gid_x385 = gid_x + 0 * (int)blockDim.x - 32; int _gid_y385 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x385 < 0) _gid_x385 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y385) * input_stride + _gid_x385); int _gid_x386 = gid_x + 1 * (int)blockDim.x - 32; int _gid_y386 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x386 < 0) _gid_x386 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y386) * input_stride + _gid_x386); int _gid_x387 = gid_x + 2 * (int)blockDim.x - 32; int _gid_y387 = gid_y + 11 * (int)blockDim.y + (-2); if (_gid_x387 < 0) _gid_x387 = 0; _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (_gid_y387) * input_stride + _gid_x387); __syncthreads(); { float _tmp388 = 0.F; { _tmp388 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp388 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp388 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp388 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp388 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp388 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp388 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp388 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp388 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp388 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp388 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp388 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp388 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp388 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp388 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp388 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp388 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp388 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp388 + 0.5F); } { float _tmp389 = 0.F; { _tmp389 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp389 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp389 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp389 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp389 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp389 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp389 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp389 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp389 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp389 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp389 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp389 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp389 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp389 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp389 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp389 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp389 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp389 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp389 + 0.5F); } { float _tmp390 = 0.F; { _tmp390 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp390 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp390 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp390 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp390 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp390 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp390 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp390 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp390 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp390 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp390 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp390 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp390 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp390 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp390 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp390 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp390 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp390 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp390 + 0.5F); } { float _tmp391 = 0.F; { _tmp391 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp391 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp391 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp391 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp391 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp391 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp391 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp391 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp391 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp391 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp391 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp391 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp391 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp391 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp391 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp391 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp391 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp391 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp391 + 0.5F); } { float _tmp392 = 0.F; { _tmp392 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp392 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp392 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp392 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp392 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp392 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp392 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp392 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp392 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp392 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp392 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp392 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp392 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp392 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp392 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp392 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp392 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp392 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp392 + 0.5F); } { float _tmp393 = 0.F; { _tmp393 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp393 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp393 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp393 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp393 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp393 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp393 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp393 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp393 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp393 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp393 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp393 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp393 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp393 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp393 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp393 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp393 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp393 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp393 + 0.5F); } { float _tmp394 = 0.F; { _tmp394 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp394 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp394 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp394 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp394 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp394 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp394 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp394 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp394 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp394 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp394 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp394 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp394 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp394 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp394 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp394 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp394 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp394 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp394 + 0.5F); } { float _tmp395 = 0.F; { _tmp395 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp395 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp395 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp395 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp395 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp395 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp395 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp395 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp395 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp395 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp395 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp395 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp395 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp395 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp395 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp395 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp395 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp395 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp395 + 0.5F); } } goto BH_EXIT; BH_NO: { _smeminput[(int)threadIdx.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 1 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 1 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 1 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 2 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 2 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 2 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 3 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 3 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 3 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 4 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 4 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 4 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 5 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 5 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 5 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 6 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 6 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 6 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 7 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 7 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 7 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 8 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 8 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 8 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 8 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 9 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 9 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 9 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 9 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 10 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 10 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 10 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 10 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 0 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 11 * (int)blockDim.y + (-2)) * input_stride + gid_x + 0 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 1 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 11 * (int)blockDim.y + (-2)) * input_stride + gid_x + 1 * (int)blockDim.x - 32); _smeminput[(int)threadIdx.y + 11 * (int)blockDim.y][(int)threadIdx.x + 2 * (int)blockDim.x] = tex1Dfetch(_texinputXY, (gid_y + 11 * (int)blockDim.y + (-2)) * input_stride + gid_x + 2 * (int)blockDim.x - 32); __syncthreads(); { float _tmp396 = 0.F; { _tmp396 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp396 += 0.0261509996F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp396 += 0.00500799995F * _smeminput[(int)threadIdx.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp396 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp396 += 0.0903389975F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp396 += 0.0597609989F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp396 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp396 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp396 += 0.136565F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp396 += 0.0903389975F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp396 += 0.0261509996F * _smeminput[(int)threadIdx.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp396 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp396 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp396 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp396 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp396 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp396 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp396 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y) * iter_stride + gid_x] = (uchar)(_tmp396 + 0.5F); } { float _tmp397 = 0.F; { _tmp397 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp397 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp397 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp397 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp397 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp397 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp397 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp397 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp397 += 0.136565F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp397 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp397 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp397 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp397 += 0.0903389975F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp397 += 0.0597609989F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp397 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp397 += 0.0261509996F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp397 += 0.0173000004F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp397 += 0.00500799995F * _smeminput[(int)threadIdx.y + 1 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp397 + 0.5F); } { float _tmp398 = 0.F; { _tmp398 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp398 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp398 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp398 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp398 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp398 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp398 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp398 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp398 += 0.136565F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp398 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp398 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp398 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp398 += 0.0903389975F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp398 += 0.0597609989F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp398 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp398 += 0.0261509996F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp398 += 0.0173000004F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp398 += 0.00500799995F * _smeminput[(int)threadIdx.y + 2 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp398 + 0.5F); } { float _tmp399 = 0.F; { _tmp399 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp399 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp399 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp399 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp399 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp399 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp399 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp399 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp399 += 0.136565F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp399 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp399 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp399 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp399 += 0.0903389975F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp399 += 0.0597609989F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp399 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp399 += 0.0261509996F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp399 += 0.0173000004F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp399 += 0.00500799995F * _smeminput[(int)threadIdx.y + 3 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp399 + 0.5F); } { float _tmp400 = 0.F; { _tmp400 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp400 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp400 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp400 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp400 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp400 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp400 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp400 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp400 += 0.136565F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp400 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp400 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp400 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp400 += 0.0903389975F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp400 += 0.0597609989F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp400 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp400 += 0.0261509996F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp400 += 0.0173000004F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp400 += 0.00500799995F * _smeminput[(int)threadIdx.y + 4 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp400 + 0.5F); } { float _tmp401 = 0.F; { _tmp401 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp401 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp401 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp401 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp401 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp401 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp401 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp401 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp401 += 0.136565F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp401 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp401 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp401 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp401 += 0.0903389975F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp401 += 0.0597609989F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp401 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp401 += 0.0261509996F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp401 += 0.0173000004F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp401 += 0.00500799995F * _smeminput[(int)threadIdx.y + 5 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp401 + 0.5F); } { float _tmp402 = 0.F; { _tmp402 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp402 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp402 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp402 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp402 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp402 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp402 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp402 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp402 += 0.136565F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp402 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp402 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp402 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp402 += 0.0903389975F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp402 += 0.0597609989F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp402 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp402 += 0.0261509996F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp402 += 0.0173000004F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp402 += 0.00500799995F * _smeminput[(int)threadIdx.y + 6 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp402 + 0.5F); } { float _tmp403 = 0.F; { _tmp403 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp403 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp403 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -2 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp403 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp403 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp403 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + -1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp403 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp403 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp403 += 0.136565F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp403 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp403 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 0 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp403 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp403 += 0.0903389975F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp403 += 0.0597609989F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 1 + 2][(int)threadIdx.x + 2 + 32]; } { _tmp403 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -2 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + -1 + 32]; } { _tmp403 += 0.0261509996F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 0 + 32]; } { _tmp403 += 0.0173000004F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 1 + 32]; } { _tmp403 += 0.00500799995F * _smeminput[(int)threadIdx.y + 7 * (int)blockDim.y + 2 + 2][(int)threadIdx.x + 2 + 32]; } iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (uchar)(_tmp403 + 0.5F); } } goto BH_EXIT; BH_EXIT: ; } } #endif //_CUGAUSSIANFILTERXY_CU_
b7857bdc5b46548cb70161d66e9eafd20b0ed018.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // SPDX-FileCopyrightText: 2020 CERN // SPDX-License-Identifier: Apache-2.0 #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <iostream> #include <AdePT/BlockData.h> using Queue_t = adept::mpmc_bounded_queue<int>; struct MyTrack { int index{0}; int pdg{0}; double energy{10}; double pos[3]{0}; double dir[3]{1}; bool flag1; bool flag2; }; struct Scoring { adept::Atomic_t<int> secondaries; adept::Atomic_t<float> totalEnergyLoss; __host__ __device__ Scoring() {} __host__ __device__ static Scoring *MakeInstanceAt(void *addr) { Scoring *obj = new (addr) Scoring(); return obj; } }; // kernel function that does transportation __global__ void transport(int n, adept::BlockData<MyTrack> *block, hiprandState_t *states, Queue_t *queues) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { // transport particles for (int xyz = 0; xyz < 3; xyz++) { (*block)[i].pos[xyz] = (*block)[i].pos[xyz] + (*block)[i].energy * (*block)[i].dir[xyz]; } } } // kernel function that assigns next process to the particle __global__ void select_process(adept::BlockData<MyTrack> *block, Scoring *scor, hiprandState_t *states, Queue_t *queues[]) { int particle_index = blockIdx.x * blockDim.x + threadIdx.x; // check if you are not outside the used block if (particle_index > block->GetNused() + block->GetNholes()) return; // check if the particle is still alive (E>0) if ((*block)[particle_index].energy == 0) return; // generate random number float r = hiprand_uniform(states); if (r > 0.5f) { queues[0]->enqueue(particle_index); } else { queues[1]->enqueue(particle_index); } } // kernel function that does energy loss __global__ void process_eloss(int n, adept::BlockData<MyTrack> *block, Scoring *scor, hiprandState_t *states, Queue_t *queue) { int particle_index; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if (!queue->dequeue(particle_index)) return; // check if the particle is still alive (E>0) if ((*block)[particle_index].energy == 0) return; // call the 'process' // energy loss float eloss = 0.2f * (*block)[particle_index].energy; scor->totalEnergyLoss.fetch_add(eloss < 0.001f ? (*block)[particle_index].energy : eloss); (*block)[particle_index].energy = (eloss < 0.001f ? 0.0f : ((*block)[particle_index].energy - eloss)); // if particle dies (E=0) release the slot if ((*block)[particle_index].energy < 0.001f) block->ReleaseElement(particle_index); } } // kernel function that does pair production __global__ void process_pairprod(int n, adept::BlockData<MyTrack> *block, Scoring *scor, hiprandState_t *states, Queue_t *queue) { int particle_index; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if (!queue->dequeue(particle_index)) return; // check if the particle is still alive (E>0) if ((*block)[particle_index].energy == 0) return; // pair production auto secondary_track = block->NextElement(); assert(secondary_track != nullptr && "No slot available for secondary track"); float eloss = 0.5f * (*block)[particle_index].energy; (*block)[particle_index].energy -= eloss; secondary_track->energy = eloss; // increase the counter of secondaries scor->secondaries.fetch_add(1); } } /* this GPU kernel function is used to initialize the random states */ __global__ void init(hiprandState_t *states) { /* we have to initialize the state */ hiprand_init(0, 0, 0, states); } // int main() { hiprandState_t *state; hipMalloc((void **)&state, sizeof(hiprandState_t)); hipLaunchKernelGGL(( init), dim3(1), dim3(1), 0, 0, state); hipDeviceSynchronize(); // Capacity of the different containers constexpr int capacity = 1 << 20; using Queue_t = adept::mpmc_bounded_queue<int>; constexpr int numberOfProcesses = 3; char *buffer[numberOfProcesses]; Queue_t **queues = nullptr; hipMallocManaged(&queues, numberOfProcesses * sizeof(Queue_t *)); size_t buffersize = Queue_t::SizeOfInstance(capacity); for (int i = 0; i < numberOfProcesses; i++) { buffer[i] = nullptr; hipMallocManaged(&buffer[i], buffersize); queues[i] = Queue_t::MakeInstanceAt(capacity, buffer[i]); } // Allocate the content of Scoring in a buffer char *buffer_scor = nullptr; hipMallocManaged(&buffer_scor, sizeof(Scoring)); Scoring *scor = Scoring::MakeInstanceAt(buffer_scor); // Initialize scoring scor->secondaries = 0; scor->totalEnergyLoss = 0; // Allocate a block of tracks with capacity larger than the total number of spawned threads // Note that if we want to allocate several consecutive block in a buffer, we have to use // Block_t::SizeOfAlignAware rather than SizeOfInstance to get the space needed per block using Block_t = adept::BlockData<MyTrack>; size_t blocksize = Block_t::SizeOfInstance(capacity); char *buffer2 = nullptr; hipMallocManaged(&buffer2, blocksize); auto block = Block_t::MakeInstanceAt(capacity, buffer2); // initializing one track in the block auto track = block->NextElement(); track->energy = 100.0f; // initializing second track in the block auto track2 = block->NextElement(); track2->energy = 30.0f; constexpr dim3 nthreads(32); constexpr dim3 maxBlocks(10); dim3 numBlocks, numBlocks_eloss, numBlocks_pairprod, numBlocks_transport; while (block->GetNused()) { numBlocks.x = (block->GetNused() + block->GetNholes() + nthreads.x - 1) / nthreads.x; // here I set the maximum number of blocks numBlocks_transport.x = ::min(numBlocks.x, maxBlocks.x); hipLaunchKernelGGL(( transport), dim3(numBlocks_transport), dim3(nthreads), 0, 0, queues[2]->size(), block, state, queues[2]); // call the kernel to select the process hipLaunchKernelGGL(( select_process), dim3(numBlocks), dim3(nthreads), 0, 0, block, scor, state, queues); hipDeviceSynchronize(); // call the process kernels numBlocks_eloss.x = ::min((queues[0]->size() + nthreads.x - 1) / nthreads.x, maxBlocks.x); numBlocks_pairprod.x = ::min((queues[1]->size() + nthreads.x - 1) / nthreads.x, maxBlocks.x); hipLaunchKernelGGL(( process_eloss), dim3(numBlocks_eloss), dim3(nthreads), 0, 0, queues[0]->size(), block, scor, state, queues[0]); hipLaunchKernelGGL(( process_pairprod), dim3(numBlocks_pairprod), dim3(nthreads), 0, 0, queues[1]->size(), block, scor, state, queues[1]); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); std::cout << "Total energy loss " << scor->totalEnergyLoss.load() << " number of secondaries " << scor->secondaries.load() << " blocks used " << block->GetNused() << std::endl; } }
b7857bdc5b46548cb70161d66e9eafd20b0ed018.cu
// SPDX-FileCopyrightText: 2020 CERN // SPDX-License-Identifier: Apache-2.0 #include <curand.h> #include <curand_kernel.h> #include <iostream> #include <AdePT/BlockData.h> using Queue_t = adept::mpmc_bounded_queue<int>; struct MyTrack { int index{0}; int pdg{0}; double energy{10}; double pos[3]{0}; double dir[3]{1}; bool flag1; bool flag2; }; struct Scoring { adept::Atomic_t<int> secondaries; adept::Atomic_t<float> totalEnergyLoss; __host__ __device__ Scoring() {} __host__ __device__ static Scoring *MakeInstanceAt(void *addr) { Scoring *obj = new (addr) Scoring(); return obj; } }; // kernel function that does transportation __global__ void transport(int n, adept::BlockData<MyTrack> *block, curandState_t *states, Queue_t *queues) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { // transport particles for (int xyz = 0; xyz < 3; xyz++) { (*block)[i].pos[xyz] = (*block)[i].pos[xyz] + (*block)[i].energy * (*block)[i].dir[xyz]; } } } // kernel function that assigns next process to the particle __global__ void select_process(adept::BlockData<MyTrack> *block, Scoring *scor, curandState_t *states, Queue_t *queues[]) { int particle_index = blockIdx.x * blockDim.x + threadIdx.x; // check if you are not outside the used block if (particle_index > block->GetNused() + block->GetNholes()) return; // check if the particle is still alive (E>0) if ((*block)[particle_index].energy == 0) return; // generate random number float r = curand_uniform(states); if (r > 0.5f) { queues[0]->enqueue(particle_index); } else { queues[1]->enqueue(particle_index); } } // kernel function that does energy loss __global__ void process_eloss(int n, adept::BlockData<MyTrack> *block, Scoring *scor, curandState_t *states, Queue_t *queue) { int particle_index; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if (!queue->dequeue(particle_index)) return; // check if the particle is still alive (E>0) if ((*block)[particle_index].energy == 0) return; // call the 'process' // energy loss float eloss = 0.2f * (*block)[particle_index].energy; scor->totalEnergyLoss.fetch_add(eloss < 0.001f ? (*block)[particle_index].energy : eloss); (*block)[particle_index].energy = (eloss < 0.001f ? 0.0f : ((*block)[particle_index].energy - eloss)); // if particle dies (E=0) release the slot if ((*block)[particle_index].energy < 0.001f) block->ReleaseElement(particle_index); } } // kernel function that does pair production __global__ void process_pairprod(int n, adept::BlockData<MyTrack> *block, Scoring *scor, curandState_t *states, Queue_t *queue) { int particle_index; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if (!queue->dequeue(particle_index)) return; // check if the particle is still alive (E>0) if ((*block)[particle_index].energy == 0) return; // pair production auto secondary_track = block->NextElement(); assert(secondary_track != nullptr && "No slot available for secondary track"); float eloss = 0.5f * (*block)[particle_index].energy; (*block)[particle_index].energy -= eloss; secondary_track->energy = eloss; // increase the counter of secondaries scor->secondaries.fetch_add(1); } } /* this GPU kernel function is used to initialize the random states */ __global__ void init(curandState_t *states) { /* we have to initialize the state */ curand_init(0, 0, 0, states); } // int main() { curandState_t *state; cudaMalloc((void **)&state, sizeof(curandState_t)); init<<<1, 1>>>(state); cudaDeviceSynchronize(); // Capacity of the different containers constexpr int capacity = 1 << 20; using Queue_t = adept::mpmc_bounded_queue<int>; constexpr int numberOfProcesses = 3; char *buffer[numberOfProcesses]; Queue_t **queues = nullptr; cudaMallocManaged(&queues, numberOfProcesses * sizeof(Queue_t *)); size_t buffersize = Queue_t::SizeOfInstance(capacity); for (int i = 0; i < numberOfProcesses; i++) { buffer[i] = nullptr; cudaMallocManaged(&buffer[i], buffersize); queues[i] = Queue_t::MakeInstanceAt(capacity, buffer[i]); } // Allocate the content of Scoring in a buffer char *buffer_scor = nullptr; cudaMallocManaged(&buffer_scor, sizeof(Scoring)); Scoring *scor = Scoring::MakeInstanceAt(buffer_scor); // Initialize scoring scor->secondaries = 0; scor->totalEnergyLoss = 0; // Allocate a block of tracks with capacity larger than the total number of spawned threads // Note that if we want to allocate several consecutive block in a buffer, we have to use // Block_t::SizeOfAlignAware rather than SizeOfInstance to get the space needed per block using Block_t = adept::BlockData<MyTrack>; size_t blocksize = Block_t::SizeOfInstance(capacity); char *buffer2 = nullptr; cudaMallocManaged(&buffer2, blocksize); auto block = Block_t::MakeInstanceAt(capacity, buffer2); // initializing one track in the block auto track = block->NextElement(); track->energy = 100.0f; // initializing second track in the block auto track2 = block->NextElement(); track2->energy = 30.0f; constexpr dim3 nthreads(32); constexpr dim3 maxBlocks(10); dim3 numBlocks, numBlocks_eloss, numBlocks_pairprod, numBlocks_transport; while (block->GetNused()) { numBlocks.x = (block->GetNused() + block->GetNholes() + nthreads.x - 1) / nthreads.x; // here I set the maximum number of blocks numBlocks_transport.x = std::min(numBlocks.x, maxBlocks.x); transport<<<numBlocks_transport, nthreads>>>(queues[2]->size(), block, state, queues[2]); // call the kernel to select the process select_process<<<numBlocks, nthreads>>>(block, scor, state, queues); cudaDeviceSynchronize(); // call the process kernels numBlocks_eloss.x = std::min((queues[0]->size() + nthreads.x - 1) / nthreads.x, maxBlocks.x); numBlocks_pairprod.x = std::min((queues[1]->size() + nthreads.x - 1) / nthreads.x, maxBlocks.x); process_eloss<<<numBlocks_eloss, nthreads>>>(queues[0]->size(), block, scor, state, queues[0]); process_pairprod<<<numBlocks_pairprod, nthreads>>>(queues[1]->size(), block, scor, state, queues[1]); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); std::cout << "Total energy loss " << scor->totalEnergyLoss.load() << " number of secondaries " << scor->secondaries.load() << " blocks used " << block->GetNused() << std::endl; } }
dae5262771147fb2a8b4de7c0f2996d58770ef48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "parameters.h" #include "unified_buffer.h" #include "accumulators.h" Cell* sys_arr_host; __device__ Cell* sys_arr; void flush_sys_arr(); void sys_arr_free(); extern __device__ int cycle_count; extern int result_rowsize_host; extern int result_colsize_host; extern __device__ int result_rowsize; extern __device__ int result_colsize; extern __device__ char* result; void sys_arr_ini() { hipMalloc((void**)&sys_arr_host, sizeof(Cell) * sys_array_size * sys_array_size); hipMemcpyToSymbol(sys_arr, &sys_arr_host, sizeof(Cell*)); flush_sys_arr(); printf("Systolic array successfully initialized, size %d * %d\n", sys_array_size, sys_array_size); } void flush_sys_arr() { hipMemset(sys_arr_host, 0, sizeof(Cell) * sys_array_size * sys_array_size); } void sys_arr_free() { hipFree(sys_arr_host); } __global__ void _heart_beat() { int x; int y; if (blockIdx.x == 0) x = feed_data_h(blockIdx.y); else x = sys_arr[blockIdx.x - 1 + blockIdx.y * gridDim.x].x_output; if (blockIdx.y == 0) y = feed_data_v(blockIdx.x); else y = sys_arr[blockIdx.x + (blockIdx.y - 1) * gridDim.x].y_output; int16_t result = x * y; accumulate(blockIdx.x, blockIdx.y, result); sys_arr[blockIdx.x + blockIdx.y * gridDim.x].x = x; sys_arr[blockIdx.x + blockIdx.y * gridDim.x].y = y; } __global__ void _cell_update() { sys_arr[blockIdx.x + blockIdx.y * gridDim.x].x_output = sys_arr[blockIdx.x + blockIdx.y * gridDim.x].x; sys_arr[blockIdx.x + blockIdx.y * gridDim.x].y_output = sys_arr[blockIdx.x + blockIdx.y * gridDim.x].y; } __global__ void increase_count() { cycle_count++; } __global__ void reset_count() { cycle_count = 0; } void heart_beat() { hipLaunchKernelGGL(( _heart_beat) , dim3(grid), dim3(1) , 0, 0, ); hipDeviceSynchronize(); _cell_update << <grid, 1 >> > (); hipDeviceSynchronize(); increase_count << <1, 1 >> > (); }
dae5262771147fb2a8b4de7c0f2996d58770ef48.cu
#include "parameters.h" #include "unified_buffer.h" #include "accumulators.h" Cell* sys_arr_host; __device__ Cell* sys_arr; void flush_sys_arr(); void sys_arr_free(); extern __device__ int cycle_count; extern int result_rowsize_host; extern int result_colsize_host; extern __device__ int result_rowsize; extern __device__ int result_colsize; extern __device__ char* result; void sys_arr_ini() { cudaMalloc((void**)&sys_arr_host, sizeof(Cell) * sys_array_size * sys_array_size); cudaMemcpyToSymbol(sys_arr, &sys_arr_host, sizeof(Cell*)); flush_sys_arr(); printf("Systolic array successfully initialized, size %d * %d\n", sys_array_size, sys_array_size); } void flush_sys_arr() { cudaMemset(sys_arr_host, 0, sizeof(Cell) * sys_array_size * sys_array_size); } void sys_arr_free() { cudaFree(sys_arr_host); } __global__ void _heart_beat() { int x; int y; if (blockIdx.x == 0) x = feed_data_h(blockIdx.y); else x = sys_arr[blockIdx.x - 1 + blockIdx.y * gridDim.x].x_output; if (blockIdx.y == 0) y = feed_data_v(blockIdx.x); else y = sys_arr[blockIdx.x + (blockIdx.y - 1) * gridDim.x].y_output; int16_t result = x * y; accumulate(blockIdx.x, blockIdx.y, result); sys_arr[blockIdx.x + blockIdx.y * gridDim.x].x = x; sys_arr[blockIdx.x + blockIdx.y * gridDim.x].y = y; } __global__ void _cell_update() { sys_arr[blockIdx.x + blockIdx.y * gridDim.x].x_output = sys_arr[blockIdx.x + blockIdx.y * gridDim.x].x; sys_arr[blockIdx.x + blockIdx.y * gridDim.x].y_output = sys_arr[blockIdx.x + blockIdx.y * gridDim.x].y; } __global__ void increase_count() { cycle_count++; } __global__ void reset_count() { cycle_count = 0; } void heart_beat() { _heart_beat <<< grid, 1 >>> (); cudaDeviceSynchronize(); _cell_update << <grid, 1 >> > (); cudaDeviceSynchronize(); increase_count << <1, 1 >> > (); }
74ddd4f697e5287e32c1766b1b1e9b2d3953fecd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "cudacpp\DeviceVector.h" #include "cudacpp\DeviceMemory.h" class IntProvider { public: virtual __device__ int getNumber() const = 0; }; class SimpleIntProvider : public IntProvider { int _i; public: __device__ __inline__ SimpleIntProvider(int i) : _i(i) {} __device__ int getNumber() const override { return _i; } }; class SumIntProvider : public IntProvider { int _a; int _b; public: __device__ __inline__ SumIntProvider(int a, int b) : _a(a), _b(b) {} __device__ int getNumber() const override { return _a + _b; } }; __device__ __inline__ void putValue(int& to, const IntProvider& ip) { to = ip.getNumber(); } __global__ void addKernel(cudacpp::DeviceVector<int> c, int val) { auto idx = threadIdx.x; //SimpleIntProvider sip{ val }; SumIntProvider sip{ val, 2 }; putValue(c[idx], sip); } int testVirtual(int size, int *c, int val) { hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); return cudaStatus; } // Allocate GPU buffers for three vectors (two input, one output) auto dev_c = cudacpp::DeviceMemory<int>::AllocateElements(size); // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, val); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); return cudaStatus; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); return cudaStatus; } // Copy output vector from GPU buffer to host memory. cudaStatus = CopyElements(c, dev_c, size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return cudaStatus; } return cudaStatus; return 0; }
74ddd4f697e5287e32c1766b1b1e9b2d3953fecd.cu
#include "cuda_runtime.h" #include <stdio.h> #include "cudacpp\DeviceVector.h" #include "cudacpp\DeviceMemory.h" class IntProvider { public: virtual __device__ int getNumber() const = 0; }; class SimpleIntProvider : public IntProvider { int _i; public: __device__ __inline__ SimpleIntProvider(int i) : _i(i) {} __device__ int getNumber() const override { return _i; } }; class SumIntProvider : public IntProvider { int _a; int _b; public: __device__ __inline__ SumIntProvider(int a, int b) : _a(a), _b(b) {} __device__ int getNumber() const override { return _a + _b; } }; __device__ __inline__ void putValue(int& to, const IntProvider& ip) { to = ip.getNumber(); } __global__ void addKernel(cudacpp::DeviceVector<int> c, int val) { auto idx = threadIdx.x; //SimpleIntProvider sip{ val }; SumIntProvider sip{ val, 2 }; putValue(c[idx], sip); } int testVirtual(int size, int *c, int val) { cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); return cudaStatus; } // Allocate GPU buffers for three vectors (two input, one output) auto dev_c = cudacpp::DeviceMemory<int>::AllocateElements(size); // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, val); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); return cudaStatus; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); return cudaStatus; } // Copy output vector from GPU buffer to host memory. cudaStatus = CopyElements(c, dev_c, size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return cudaStatus; } return cudaStatus; return 0; }
4051b0468208df95c6f0a3340362740e9bf40d64.hip
// !!! This is a file automatically generated by hipify!!! /** * atax.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include "./polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.5 #define GPU_DEVICE 0 /* Problem size. */ #define NX 32768 #define NY 32768 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 1024 #define DIM_THREAD_BLOCK_Y 1 #ifndef M_PI #define M_PI 3.14159 #endif /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE *x, DATA_TYPE *A) { int i, j; for (i = 0; i < NX; i++) { x[i] = i * M_PI; for (j = 0; j < NY; j++) { A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX; } } } void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu) { int i, fail; fail = 0; for (i=0; i<NY; i++) { if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); //printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void atax_kernel1(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < NX) { int j; for(j=0; j < NY; j++) { tmp[i] += A[i * NY + j] * x[j]; } } } __global__ void atax_kernel2(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp) { int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < NY) { int i; for(i=0; i < NX; i++) { y[j] += A[i * NY + j] * tmp[i]; } } } void atax_cpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp) { int i,j; for (i= 0; i < NY; i++) { y[i] = 0; } for (i = 0; i < NX; i++) { tmp[i] = 0; for (j = 0; j < NY; j++) { tmp[i] = tmp[i] + A[i*NY + j] * x[j]; } for (j = 0; j < NY; j++) { y[j] = y[j] + A[i*NY + j] * tmp[i]; } } } void ataxGpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu) { /* DATA_TYPE *A_gpu; DATA_TYPE *x_gpu; DATA_TYPE *y_gpu; DATA_TYPE *tmp_gpu; hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY); hipMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * NY); hipMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * NY); hipMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * NX); */ hipStream_t streams[1]; hipStreamCreate(&(streams[0])); hipEvent_t start,stop; float elapsedTimeInMs = 0.0f; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); /* hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, hipMemcpyHostToDevice); hipMemcpy(x_gpu, x, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice); hipMemcpy(y_gpu, y, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice); hipMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * NX, hipMemcpyHostToDevice); */ dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid1((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1); dim3 grid2((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1); hipMemPrefetchAsync(A, sizeof(DATA_TYPE) * NX * NY, 0,streams[0]); hipLaunchKernelGGL(( atax_kernel1), dim3(grid1), dim3(block) , 0, 0, A,x,tmp); hipDeviceSynchronize(); hipLaunchKernelGGL(( atax_kernel2), dim3(grid2), dim3(block) , 0, 0, A,y,tmp); hipDeviceSynchronize(); //hipMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, hipMemcpyDeviceToHost); hipMemPrefetchAsync(y, sizeof(DATA_TYPE) * NX, hipCpuDeviceId,streams[0]); hipEventRecord(stop); hipDeviceSynchronize(); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTimeInMs, start, stop); fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs); /* hipFree(A_gpu); hipFree(x_gpu); hipFree(y_gpu); hipFree(tmp_gpu); */ } int main(int argc, char** argv) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* x; DATA_TYPE* y; DATA_TYPE* y_outputFromGpu; DATA_TYPE* tmp; /* A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE)); */ /* hipHostMalloc((void **)&A, sizeof(DATA_TYPE) * NX * NY, hipHostMallocPortable); hipHostMalloc((void **)&x, sizeof(DATA_TYPE) * NY, hipHostMallocPortable); hipHostMalloc((void **)&y, sizeof(DATA_TYPE) * NY, hipHostMallocPortable); hipHostMalloc((void **)&y_outputFromGpu, sizeof(DATA_TYPE) * NY, hipHostMallocPortable); hipHostMalloc((void **)&tmp, sizeof(DATA_TYPE) * NX, hipHostMallocPortable); */ hipMallocManaged((void **)&A, sizeof(DATA_TYPE) * NX * NY); hipMallocManaged((void **)&x, sizeof(DATA_TYPE) * NY); hipMallocManaged((void **)&y, sizeof(DATA_TYPE) * NY); hipMallocManaged((void **)&y_outputFromGpu, sizeof(DATA_TYPE) * NY); hipMallocManaged((void **)&tmp, sizeof(DATA_TYPE) * NX); init_array(x, A); GPU_argv_init(); ataxGpu(A, x, y, tmp, y_outputFromGpu); /* t_start = rtclock(); atax_cpu(A, x, y, tmp); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(y, y_outputFromGpu); */ hipFree(A); hipFree(x); hipFree(y); hipFree(y_outputFromGpu); hipFree(tmp); return 0; }
4051b0468208df95c6f0a3340362740e9bf40d64.cu
/** * atax.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <cuda.h> #include "./polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.5 #define GPU_DEVICE 0 /* Problem size. */ #define NX 32768 #define NY 32768 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 1024 #define DIM_THREAD_BLOCK_Y 1 #ifndef M_PI #define M_PI 3.14159 #endif /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE *x, DATA_TYPE *A) { int i, j; for (i = 0; i < NX; i++) { x[i] = i * M_PI; for (j = 0; j < NY; j++) { A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX; } } } void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu) { int i, fail; fail = 0; for (i=0; i<NY; i++) { if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); //printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void atax_kernel1(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < NX) { int j; for(j=0; j < NY; j++) { tmp[i] += A[i * NY + j] * x[j]; } } } __global__ void atax_kernel2(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp) { int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < NY) { int i; for(i=0; i < NX; i++) { y[j] += A[i * NY + j] * tmp[i]; } } } void atax_cpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp) { int i,j; for (i= 0; i < NY; i++) { y[i] = 0; } for (i = 0; i < NX; i++) { tmp[i] = 0; for (j = 0; j < NY; j++) { tmp[i] = tmp[i] + A[i*NY + j] * x[j]; } for (j = 0; j < NY; j++) { y[j] = y[j] + A[i*NY + j] * tmp[i]; } } } void ataxGpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu) { /* DATA_TYPE *A_gpu; DATA_TYPE *x_gpu; DATA_TYPE *y_gpu; DATA_TYPE *tmp_gpu; cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY); cudaMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * NY); cudaMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * NY); cudaMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * NX); */ cudaStream_t streams[1]; cudaStreamCreate(&(streams[0])); cudaEvent_t start,stop; float elapsedTimeInMs = 0.0f; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); /* cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyHostToDevice); cudaMemcpy(x_gpu, x, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice); cudaMemcpy(y_gpu, y, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice); cudaMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * NX, cudaMemcpyHostToDevice); */ dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid1((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1); dim3 grid2((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1); cudaMemPrefetchAsync(A, sizeof(DATA_TYPE) * NX * NY, 0,streams[0]); atax_kernel1<<< grid1, block >>>(A,x,tmp); cudaThreadSynchronize(); atax_kernel2<<< grid2, block >>>(A,y,tmp); cudaThreadSynchronize(); //cudaMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, cudaMemcpyDeviceToHost); cudaMemPrefetchAsync(y, sizeof(DATA_TYPE) * NX, cudaCpuDeviceId,streams[0]); cudaEventRecord(stop); cudaDeviceSynchronize(); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimeInMs, start, stop); fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs); /* cudaFree(A_gpu); cudaFree(x_gpu); cudaFree(y_gpu); cudaFree(tmp_gpu); */ } int main(int argc, char** argv) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* x; DATA_TYPE* y; DATA_TYPE* y_outputFromGpu; DATA_TYPE* tmp; /* A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE)); */ /* cudaHostAlloc((void **)&A, sizeof(DATA_TYPE) * NX * NY, cudaHostAllocPortable); cudaHostAlloc((void **)&x, sizeof(DATA_TYPE) * NY, cudaHostAllocPortable); cudaHostAlloc((void **)&y, sizeof(DATA_TYPE) * NY, cudaHostAllocPortable); cudaHostAlloc((void **)&y_outputFromGpu, sizeof(DATA_TYPE) * NY, cudaHostAllocPortable); cudaHostAlloc((void **)&tmp, sizeof(DATA_TYPE) * NX, cudaHostAllocPortable); */ cudaMallocManaged((void **)&A, sizeof(DATA_TYPE) * NX * NY); cudaMallocManaged((void **)&x, sizeof(DATA_TYPE) * NY); cudaMallocManaged((void **)&y, sizeof(DATA_TYPE) * NY); cudaMallocManaged((void **)&y_outputFromGpu, sizeof(DATA_TYPE) * NY); cudaMallocManaged((void **)&tmp, sizeof(DATA_TYPE) * NX); init_array(x, A); GPU_argv_init(); ataxGpu(A, x, y, tmp, y_outputFromGpu); /* t_start = rtclock(); atax_cpu(A, x, y, tmp); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(y, y_outputFromGpu); */ cudaFree(A); cudaFree(x); cudaFree(y); cudaFree(y_outputFromGpu); cudaFree(tmp); return 0; }
9dc081bc1762c8d265743cb82f79cb19cdd28602.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <sys/time.h> #include <sys/resource.h> // Tipo de los datos del algoritmo typedef int data_t; // Prototipos data_t add(const data_t a, const data_t b) { return a + b; } data_t sub(const data_t a, const data_t b) { return a - b; } void init_matrix(data_t *M, const unsigned int size, data_t(*init_op)(const data_t, const data_t)); void run_GPU(data_t* host_A, data_t* host_B, const unsigned int n_bytes); void print_matrix(data_t * const M, const unsigned int size); double tick(); __global__ void kernel_op_1(data_t * A, data_t * B); __global__ void kernel_op_2(data_t * M, const unsigned int size); // Host function int main(int argc, char** argv) { const unsigned int N = (argc == 2) ? atoi(argv[1]) : 0; double t, resultado; if (!N){ printf("Parametros incorrectos. El programa se cierra\n"); return -1; } // Mostrar tipo de elemento printf("Tamao del elemento a procesar: %d bytes\n", sizeof(data_t)); // En la CPU... // ...Aloca matrices t = tick(); const unsigned int n_bytes = sizeof(data_t)*N*N; data_t *host_A = (data_t*) malloc(n_bytes); data_t *host_B = (data_t*) malloc(n_bytes); t = tick() - t; printf("Alocar matrices en mem. de CPU: %f\n", t); // ...Inicializa matrices t = tick(); init_matrix(host_A, N, &add); init_matrix(host_B, N, &sub); t = tick() - t; printf("Inicializar matrices en mem. de CPU: %f\n", t); #ifdef DEBUG printf("Matriz A =====\n"); print_matrix(host_A, N); printf("Matriz B =====\n"); print_matrix(host_B, N); #endif run_GPU(host_A, host_B, n_bytes); // Verificacion de resultados #ifdef DEBUG printf("Resultado parcial =====\n"); print_matrix(host_A, N); #endif //Paso final: dividir la suma resultado = host_A[0]/((float)N*N); t = tick(); free(host_A); free(host_B); t = tick() - t; printf("Liberacion de mem. CPU: %f\n", t); printf("\x1B[36mResultado final =====>>> %f\x1B[0m\n", resultado); return 0; } void run_GPU(data_t* host_A, data_t* host_B, const unsigned int n_bytes) { data_t *gpu_A, *gpu_B; const unsigned int size = n_bytes / sizeof(data_t); unsigned int i; double t; // Aloca memoria en GPU t = tick(); hipMalloc((void**)&gpu_A, n_bytes); hipMalloc((void**)&gpu_B, n_bytes); t = tick() - t; printf("Alocar matrices en mem. de GPU: %f\n", t); // Copia los datos desde el host a la GPU t = tick(); hipMemcpy(gpu_A, host_A, n_bytes, hipMemcpyHostToDevice); hipMemcpy(gpu_B, host_B, n_bytes, hipMemcpyHostToDevice); t = tick() - t; printf("Copia de datos desde mem. CPU hacia mem. GPU: %f\n", t); // Configura el tamao de los grids y los bloques dim3 dimGrid(1); dim3 dimBlock(16); // Invoca al kernel t = tick(); hipLaunchKernelGGL(( kernel_op_1), dim3(dimGrid), dim3(dimBlock) , 0, 0, gpu_A, gpu_B); hipDeviceSynchronize(); for (i=1; i<size; i*=2) { hipLaunchKernelGGL(( kernel_op_2), dim3(dimGrid), dim3(dimBlock) , 0, 0, gpu_A, i); hipDeviceSynchronize(); } t = tick() - t; printf("\x1B[33mEjecucion del kernel de GPU: %f\x1B[0m\n", t); // Recupera los resultados, guardandolos en el host t = tick(); hipMemcpy(host_A, gpu_A, n_bytes, hipMemcpyDeviceToHost); hipMemcpy(host_B, gpu_B, n_bytes, hipMemcpyDeviceToHost); t = tick() - t; printf("Copia de datos desde mem. GPU hacia mem. CPU: %f\n", t); // Libera la memoria alocada en la GPU t = tick(); hipFree(gpu_A); hipFree(gpu_B); t = tick() - t; printf("Liberar mem. de GPU: %f\n", t); } // Los kernels que ejecutaran por cada hilo de la GPU __global__ void kernel_op_1(data_t *A, data_t *B) { unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x; A[global_id] = (A[global_id] - B[global_id]) * (A[global_id] - B[global_id]); } __global__ void kernel_op_2(data_t *M, const unsigned int offset) { unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x; M[global_id] += M[global_id + offset]; } // Funcion para la inicializacion de las matrices void init_matrix(data_t *M, const unsigned int size, data_t(*init_op)(const data_t, const data_t)) { unsigned int i,j; for (i=0; i<size; i++) { for (j=0; j<size; j++) { M[i*size + j] = (*init_op)(i,j); } } } // Impresion de matriz void print_matrix(data_t * const M, const unsigned int size) { int i,j; for (i = 0; i < size; i++) { for (j = 0; j < size; j++) printf("%8d ", M[i*size+j]); printf("\n"); } } // Para medir los tiempos double tick(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; }
9dc081bc1762c8d265743cb82f79cb19cdd28602.cu
#include <cuda.h> #include <stdio.h> #include <sys/time.h> #include <sys/resource.h> // Tipo de los datos del algoritmo typedef int data_t; // Prototipos data_t add(const data_t a, const data_t b) { return a + b; } data_t sub(const data_t a, const data_t b) { return a - b; } void init_matrix(data_t *M, const unsigned int size, data_t(*init_op)(const data_t, const data_t)); void run_GPU(data_t* host_A, data_t* host_B, const unsigned int n_bytes); void print_matrix(data_t * const M, const unsigned int size); double tick(); __global__ void kernel_op_1(data_t * A, data_t * B); __global__ void kernel_op_2(data_t * M, const unsigned int size); // Host function int main(int argc, char** argv) { const unsigned int N = (argc == 2) ? atoi(argv[1]) : 0; double t, resultado; if (!N){ printf("Parametros incorrectos. El programa se cierra\n"); return -1; } // Mostrar tipo de elemento printf("Tamaño del elemento a procesar: %d bytes\n", sizeof(data_t)); // En la CPU... // ...Aloca matrices t = tick(); const unsigned int n_bytes = sizeof(data_t)*N*N; data_t *host_A = (data_t*) malloc(n_bytes); data_t *host_B = (data_t*) malloc(n_bytes); t = tick() - t; printf("Alocar matrices en mem. de CPU: %f\n", t); // ...Inicializa matrices t = tick(); init_matrix(host_A, N, &add); init_matrix(host_B, N, &sub); t = tick() - t; printf("Inicializar matrices en mem. de CPU: %f\n", t); #ifdef DEBUG printf("Matriz A =====\n"); print_matrix(host_A, N); printf("Matriz B =====\n"); print_matrix(host_B, N); #endif run_GPU(host_A, host_B, n_bytes); // Verificacion de resultados #ifdef DEBUG printf("Resultado parcial =====\n"); print_matrix(host_A, N); #endif //Paso final: dividir la suma resultado = host_A[0]/((float)N*N); t = tick(); free(host_A); free(host_B); t = tick() - t; printf("Liberacion de mem. CPU: %f\n", t); printf("\x1B[36mResultado final =====>>> %f\x1B[0m\n", resultado); return 0; } void run_GPU(data_t* host_A, data_t* host_B, const unsigned int n_bytes) { data_t *gpu_A, *gpu_B; const unsigned int size = n_bytes / sizeof(data_t); unsigned int i; double t; // Aloca memoria en GPU t = tick(); cudaMalloc((void**)&gpu_A, n_bytes); cudaMalloc((void**)&gpu_B, n_bytes); t = tick() - t; printf("Alocar matrices en mem. de GPU: %f\n", t); // Copia los datos desde el host a la GPU t = tick(); cudaMemcpy(gpu_A, host_A, n_bytes, cudaMemcpyHostToDevice); cudaMemcpy(gpu_B, host_B, n_bytes, cudaMemcpyHostToDevice); t = tick() - t; printf("Copia de datos desde mem. CPU hacia mem. GPU: %f\n", t); // Configura el tamaño de los grids y los bloques dim3 dimGrid(1); dim3 dimBlock(16); // Invoca al kernel t = tick(); kernel_op_1<<< dimGrid, dimBlock >>>(gpu_A, gpu_B); cudaThreadSynchronize(); for (i=1; i<size; i*=2) { kernel_op_2<<< dimGrid, dimBlock >>>(gpu_A, i); cudaThreadSynchronize(); } t = tick() - t; printf("\x1B[33mEjecucion del kernel de GPU: %f\x1B[0m\n", t); // Recupera los resultados, guardandolos en el host t = tick(); cudaMemcpy(host_A, gpu_A, n_bytes, cudaMemcpyDeviceToHost); cudaMemcpy(host_B, gpu_B, n_bytes, cudaMemcpyDeviceToHost); t = tick() - t; printf("Copia de datos desde mem. GPU hacia mem. CPU: %f\n", t); // Libera la memoria alocada en la GPU t = tick(); cudaFree(gpu_A); cudaFree(gpu_B); t = tick() - t; printf("Liberar mem. de GPU: %f\n", t); } // Los kernels que ejecutaran por cada hilo de la GPU __global__ void kernel_op_1(data_t *A, data_t *B) { unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x; A[global_id] = (A[global_id] - B[global_id]) * (A[global_id] - B[global_id]); } __global__ void kernel_op_2(data_t *M, const unsigned int offset) { unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x; M[global_id] += M[global_id + offset]; } // Funcion para la inicializacion de las matrices void init_matrix(data_t *M, const unsigned int size, data_t(*init_op)(const data_t, const data_t)) { unsigned int i,j; for (i=0; i<size; i++) { for (j=0; j<size; j++) { M[i*size + j] = (*init_op)(i,j); } } } // Impresion de matriz void print_matrix(data_t * const M, const unsigned int size) { int i,j; for (i = 0; i < size; i++) { for (j = 0; j < size; j++) printf("%8d ", M[i*size+j]); printf("\n"); } } // Para medir los tiempos double tick(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; }
fb8d2bdf3d69ecd6bda8902073c241a6f2d5c67f.hip
// !!! This is a file automatically generated by hipify!!! #include <cassert> #include <cfloat> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; #define MSIZE 12*8*21 //22 #define BLOCK_SIZE 256 #define WARP_SIZE 32 surface<void,1> surf_vec; static const double MAX_RELATIVE_ERROR = .02; static const int PAD_FACTOR = 16; void fill(float *A, const int n, const float maxi) { for (int j = 0; j < n; j++) { A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f))); } } void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim) { int nnzAssigned = 0; // Figure out the probability that a nonzero should be assigned to a given // spot in the matrix double prob = (double)n / ((double)dim * (double)dim); // Seed random number generator srand48(2013); // Randomly decide whether entry i,j gets a value, but ensure n values // are assigned bool fillRemaining = false; for (int i = 0; i < dim; i++) { rowDelimiters[i] = nnzAssigned; for (int j = 0; j < dim; j++) { int numEntriesLeft = (dim * dim) - ((i * dim) + j); int needToAssign = n - nnzAssigned; if (numEntriesLeft <= needToAssign) { fillRemaining = true; } if ((nnzAssigned < n && drand48() <= prob) || fillRemaining) { // Assign (i,j) a value cols[nnzAssigned] = j; nnzAssigned++; } } } // Observe the convention to put the number of non zeroes at the end of the // row delimiters array rowDelimiters[dim] = n; assert(nnzAssigned == n); } void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters, float **newA_ptr, int **newcols_ptr, int *newIndices, int *newSize) { // determine total padded size and new row indices int paddedSize = 0; int rowSize; for (int i=0; i<dim; i++) { newIndices[i] = paddedSize; rowSize = rowDelimiters[i+1] - rowDelimiters[i]; if (rowSize % PAD_FACTOR != 0) { rowSize += PAD_FACTOR - rowSize % PAD_FACTOR; } paddedSize += rowSize; } *newSize = paddedSize; newIndices[dim] = paddedSize; hipHostMalloc(newA_ptr, paddedSize * sizeof(float)); hipHostMalloc(newcols_ptr, paddedSize * sizeof(int)); float *newA = *newA_ptr; int *newcols = *newcols_ptr; memset(newA, 0, paddedSize * sizeof(float)); // fill newA and newcols for (int i=0; i<dim; i++) { for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1]; j++, k++) { newA[k] = A[j]; newcols[k] = cols[j]; } } } void spmvCpu(const float *val, const int *cols, const int *rowDelimiters, const float *vec, int dim, float *out) { for (int i=0; i<dim; i++) { float t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col]; } out[i] = t; } } void spmv_verifyResults(const float *cpuResults, const float *gpuResults, const int size) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR) { cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] << " dev: " << gpuResults[i] << endl; return; } } cout << "spmv passed" << endl; } __global__ void spmv_kernel(const float* val, const int * cols, const int * rowDelimiters, const float * vec, const int dim, float * out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (WARP_SIZE-1); int warpsPerBlock = blockDim.x / WARP_SIZE; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE); __shared__ volatile float partialSums[BLOCK_SIZE]; if (myRow < dim) { int warpStart = __ldg(&rowDelimiters[myRow]); int warpEnd = __ldg(&rowDelimiters[myRow+1]); float mySum = 0; for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE) { float temp; int col = __ldg(&cols[j]); surf1Dread(&temp,surf_vec,col*4,hipBoundaryModeTrap); mySum += __ldg(&val[j]) * temp;//vec[col]; } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } int main(int argc, char **argv) { hipSetDevice(1); srand(2013); float *h_spmv_val, *h_spmv_valPad; int *h_spmv_cols, *h_spmv_colsPad; int *h_rowDelimiters, *h_rowDelimitersPad; float *h_spmv_vec, *h_spmv_out, *spmv_refOut; int spmv_nItems, nItemsPadded, spmv_numRows; spmv_numRows = MSIZE * (BLOCK_SIZE/WARP_SIZE); spmv_nItems = spmv_numRows * (spmv_numRows / 10); // 1% of entries will be non-zero float maxval = 200.0; hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float)); hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int)); hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_val, spmv_nItems, maxval); initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows); // Set up remaining host data int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR); hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float)) ; spmv_refOut = new float[spmv_numRows]; hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_vec, spmv_numRows, maxval); hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float)); convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad, &h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded); // Compute reference solution spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut); float *d_spmv_val, *d_spmv_vec, *d_spmv_out; int *d_spmv_cols, *d_rowDelimiters; // Allocate device memory hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float)); hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int)); hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float)); hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float)); hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int)); // Transfer data to device hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice); //hipChannelFormatDesc channelDescA = hipCreateChannelDesc<float>(); hipChannelFormatDesc channelDescA=hipCreateChannelDesc(32,0,0,0,hipChannelFormatKindFloat); hipArray *A_vec; hipMallocArray(&A_vec,&channelDescA,spmv_numRows,1,hipArraySurfaceLoadStore); hipMemcpyToArray(A_vec,0,0,h_spmv_vec,spmv_numRows*sizeof(float),hipMemcpyHostToDevice); hipBindSurfaceToArray(surf_vec,A_vec); hipEvent_t kernel_start, kernel_stop; hipEventCreate(&kernel_start); hipEventCreate(&kernel_stop); float kernel_time = 0.0f; hipEventRecord(kernel_start, 0); // Setup thread configuration int spmv_grid = (int) ceil(spmv_numRows / (float)(BLOCK_SIZE / WARP_SIZE)); for (int i=0; i<10; i++) // repeat 10 times hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(BLOCK_SIZE), 0, 0, d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out); hipDeviceSynchronize(); hipEventRecord(kernel_stop, 0); hipEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost); spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows); return 0; }
fb8d2bdf3d69ecd6bda8902073c241a6f2d5c67f.cu
#include <cassert> #include <cfloat> #include <cuda_runtime_api.h> #include <cuda.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; #define MSIZE 12*8*21 //22 #define BLOCK_SIZE 256 #define WARP_SIZE 32 surface<void,1> surf_vec; static const double MAX_RELATIVE_ERROR = .02; static const int PAD_FACTOR = 16; void fill(float *A, const int n, const float maxi) { for (int j = 0; j < n; j++) { A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f))); } } void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim) { int nnzAssigned = 0; // Figure out the probability that a nonzero should be assigned to a given // spot in the matrix double prob = (double)n / ((double)dim * (double)dim); // Seed random number generator srand48(2013); // Randomly decide whether entry i,j gets a value, but ensure n values // are assigned bool fillRemaining = false; for (int i = 0; i < dim; i++) { rowDelimiters[i] = nnzAssigned; for (int j = 0; j < dim; j++) { int numEntriesLeft = (dim * dim) - ((i * dim) + j); int needToAssign = n - nnzAssigned; if (numEntriesLeft <= needToAssign) { fillRemaining = true; } if ((nnzAssigned < n && drand48() <= prob) || fillRemaining) { // Assign (i,j) a value cols[nnzAssigned] = j; nnzAssigned++; } } } // Observe the convention to put the number of non zeroes at the end of the // row delimiters array rowDelimiters[dim] = n; assert(nnzAssigned == n); } void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters, float **newA_ptr, int **newcols_ptr, int *newIndices, int *newSize) { // determine total padded size and new row indices int paddedSize = 0; int rowSize; for (int i=0; i<dim; i++) { newIndices[i] = paddedSize; rowSize = rowDelimiters[i+1] - rowDelimiters[i]; if (rowSize % PAD_FACTOR != 0) { rowSize += PAD_FACTOR - rowSize % PAD_FACTOR; } paddedSize += rowSize; } *newSize = paddedSize; newIndices[dim] = paddedSize; cudaMallocHost(newA_ptr, paddedSize * sizeof(float)); cudaMallocHost(newcols_ptr, paddedSize * sizeof(int)); float *newA = *newA_ptr; int *newcols = *newcols_ptr; memset(newA, 0, paddedSize * sizeof(float)); // fill newA and newcols for (int i=0; i<dim; i++) { for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1]; j++, k++) { newA[k] = A[j]; newcols[k] = cols[j]; } } } void spmvCpu(const float *val, const int *cols, const int *rowDelimiters, const float *vec, int dim, float *out) { for (int i=0; i<dim; i++) { float t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col]; } out[i] = t; } } void spmv_verifyResults(const float *cpuResults, const float *gpuResults, const int size) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR) { cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] << " dev: " << gpuResults[i] << endl; return; } } cout << "spmv passed" << endl; } __global__ void spmv_kernel(const float* val, const int * cols, const int * rowDelimiters, const float * vec, const int dim, float * out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (WARP_SIZE-1); int warpsPerBlock = blockDim.x / WARP_SIZE; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE); __shared__ volatile float partialSums[BLOCK_SIZE]; if (myRow < dim) { int warpStart = __ldg(&rowDelimiters[myRow]); int warpEnd = __ldg(&rowDelimiters[myRow+1]); float mySum = 0; for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE) { float temp; int col = __ldg(&cols[j]); surf1Dread(&temp,surf_vec,col*4,cudaBoundaryModeTrap); mySum += __ldg(&val[j]) * temp;//vec[col]; } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } int main(int argc, char **argv) { cudaSetDevice(1); srand(2013); float *h_spmv_val, *h_spmv_valPad; int *h_spmv_cols, *h_spmv_colsPad; int *h_rowDelimiters, *h_rowDelimitersPad; float *h_spmv_vec, *h_spmv_out, *spmv_refOut; int spmv_nItems, nItemsPadded, spmv_numRows; spmv_numRows = MSIZE * (BLOCK_SIZE/WARP_SIZE); spmv_nItems = spmv_numRows * (spmv_numRows / 10); // 1% of entries will be non-zero float maxval = 200.0; cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float)); cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int)); cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_val, spmv_nItems, maxval); initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows); // Set up remaining host data int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR); cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float)) ; spmv_refOut = new float[spmv_numRows]; cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_vec, spmv_numRows, maxval); cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float)); convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad, &h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded); // Compute reference solution spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut); float *d_spmv_val, *d_spmv_vec, *d_spmv_out; int *d_spmv_cols, *d_rowDelimiters; // Allocate device memory cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float)); cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int)); cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float)); cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float)); cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int)); // Transfer data to device cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice); //cudaChannelFormatDesc channelDescA = cudaCreateChannelDesc<float>(); cudaChannelFormatDesc channelDescA=cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindFloat); cudaArray *A_vec; cudaMallocArray(&A_vec,&channelDescA,spmv_numRows,1,cudaArraySurfaceLoadStore); cudaMemcpyToArray(A_vec,0,0,h_spmv_vec,spmv_numRows*sizeof(float),cudaMemcpyHostToDevice); cudaBindSurfaceToArray(surf_vec,A_vec); cudaEvent_t kernel_start, kernel_stop; cudaEventCreate(&kernel_start); cudaEventCreate(&kernel_stop); float kernel_time = 0.0f; cudaEventRecord(kernel_start, 0); // Setup thread configuration int spmv_grid = (int) ceil(spmv_numRows / (float)(BLOCK_SIZE / WARP_SIZE)); for (int i=0; i<10; i++) // repeat 10 times spmv_kernel <<<spmv_grid, BLOCK_SIZE>>> (d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out); cudaDeviceSynchronize(); cudaEventRecord(kernel_stop, 0); cudaEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost); spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows); return 0; }
6720214781e58b106e41bb6915dc9feae063792c.hip
// !!! This is a file automatically generated by hipify!!! #include "stdafx.h" #ifndef __HIPCC__ #define __HIPCC__ #endif #include "hip/device_functions.h" #include <iostream> #include <vector> #include <hip/hip_runtime.h> #include <rocblas.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <cuda_surface_types.h> #include "device_launch_parameters.h" //device_launch_parameters.h" //#include <comutil.h> #include <math.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include "rocblas.h" #if defined(_WIN32) #include <comutil.h> using namespace _com_util; #pragma comment(lib, "cudart") #pragma comment(lib,"cublas.lib") #endif using namespace std; //using namespace _com_util; __global__ void cuda_matrix_ada_grad_decent(float * gpu_floats_a, float * gpu_floats_b, float * adaG, uint32_t m, uint32_t n, float lr, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n && idy < m) { int updateIdx = idy*n + idx; float gradval = gpu_floats_b[updateIdx]; float adaval = adaG[updateIdx] + gradval * gradval; adaG[updateIdx] = adaval; gpu_floats_a[updateIdx] = gpu_floats_a[updateIdx] - (lr*gradval/(sqrtf(adaval)+eps)); } } void cuda_Matrix_Ada_Grad_Decent(float * gpu_floats_a, float * gpu_floats_b, float * adaG, uint32_t m, uint32_t n, float lr, float eps) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_matrix_ada_grad_decent), dim3(block_tail), dim3(thread_tail), 0, 0, gpu_floats_a, gpu_floats_b, adaG, m, n, lr, eps); } __global__ void cuda_matrix_grad_decent(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n, float lr) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n && idy < m) { gpu_floats_a[idy*n + idx] = gpu_floats_a[idy*n + idx] - gpu_floats_b[idy*n + idx] * lr; } } void cuda_Matrix_Grad_Decent(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n, float lr) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_matrix_grad_decent), dim3(block_tail), dim3(thread_tail), 0, 0, gpu_floats_a, gpu_floats_b, m, n, lr); } __global__ void cuda_matrix_add(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n, float mweight) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < n && idy < m) { gpu_floats_a[idy*n+idx] = gpu_floats_a[idy*n+idx] + gpu_floats_b[idy*n+idx] * mweight; } } void cuda_Matrix_Add(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n, float mweight) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_matrix_add), dim3(block_tail) ,dim3(thread_tail), 0, 0, gpu_floats_a, gpu_floats_b, m, n,mweight); } __global__ void cuda_matrix_add_real(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n && idy < m) { gpu_floats_a[idy*n + idx] = gpu_floats_a[idy*n + idx] - gpu_floats_b[idy*n + idx]; } } void cuda_Matrix_Add_REAL(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_matrix_add_real), dim3(block_tail), dim3(thread_tail), 0, 0, gpu_floats_a, gpu_floats_b, m, n); } __global__ void cuda_scale_matrix(float * gpu_floats_a, uint32_t m, uint32_t n, float mweight) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < n && idy < m) { gpu_floats_a[idy * n + idx] = gpu_floats_a[idy * n + idx] * mweight; //(float)log( (float)gpu_floats_a[idx]); } } void cuda_Scale_Matrix(float * gpu_floats_a, uint32_t m, uint32_t n, float mweight) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_scale_matrix), dim3(block_tail) ,dim3(thread_tail) , 0, 0, gpu_floats_a, m, n, mweight); } __global__ void cuda_matrix_add_tanh(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < n && idy < m ) { uint32_t col = idx ; //% n; float t = gpu_floats_a[idy * n + idx] + gpu_floats_b[col]; gpu_floats_a[idy * n + idx] = tanhf(t); } } void cuda_Matrix_Add_Tanh(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_matrix_add_tanh), dim3(block_tail) ,dim3(thread_tail) , 0, 0, gpu_floats_a,gpu_floats_b, m, n); } __global__ void cuda_matrix_add_vector(float * gpu_floats_a, float * gpu_floats_b, uint32_t batchsize, uint32_t dimension) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < dimension && idy < batchsize ) { gpu_floats_a[idy * dimension + idx] = gpu_floats_a[idy * dimension + idx] + gpu_floats_b[idx]; } } void cuda_Matrix_Add_Vector(float * gpu_floats_a, float * gpu_floats_b, uint32_t batchsize, uint32_t dimension) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_matrix_add_vector), dim3(block_tail) ,dim3(thread_tail) , 0, 0, gpu_floats_a,gpu_floats_b, batchsize, dimension); } __global__ void cuda_matrix_rectified_vector(float * gpu_floats_a, float * gpu_floats_b, uint32_t batchsize, uint32_t dimension) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < dimension && idy < batchsize ) { gpu_floats_a[idy * dimension + idx] = gpu_floats_a[idy * dimension + idx] + gpu_floats_b[idx]; if(gpu_floats_a[idy * dimension + idx] < 0) { gpu_floats_a[idy * dimension + idx] = 0; } } } void cuda_Matrix_Rectified_Vector(float * gpu_floats_a, float * gpu_floats_b, uint32_t batchsize, uint32_t dimension) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_matrix_rectified_vector), dim3(block_tail) ,dim3(thread_tail) , 0, 0, gpu_floats_a,gpu_floats_b, batchsize, dimension); } __global__ void cuda_deriv_cosine(float * q, float * d, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float a = 0; float b = eps; float c = eps; for(uint32_t i=0;i<m;i++) { a += q[idx * m + i] * d[idx * m + i]; b += q[idx * m + i] * q[idx * m + i]; c += d[idx * m + i] * d[idx * m + i]; } b = sqrtf(b); c = sqrtf(c); for(uint32_t i=0;i<m;i++) { dcq[idx * m + i] = (float)( (1 - q[idx * m + i]) * ( 1 + q[idx * m + i]) * (d[idx*m+i] * 1.0f / (b*c) - q[idx*m+i] * a * 1.0f / (b*b*b*c)) ); dcd[idx * m + i] = (float)( (1 - d[idx * m + i]) * ( 1 + d[idx * m + i]) * (q[idx*m+i] * 1.0f / (b*c) - d[idx*m+i] * a * 1.0f / (b*c*c*c)) ); dcq[idx * m + i] = dcq[idx * m + i] * 1.0f / batchsize; dcd[idx * m + i] = dcd[idx * m + i] * 1.0f / batchsize; } } } void cuda_Deriv_Cosine( float * q, float * d, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_deriv_cosine), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK), 0, 0, q,d,dcq,dcd,batchsize,m,eps); } __global__ void cuda_deriv_dis(float * s1deriv, float * s2deriv, float * s3deriv, float * s1, float * s2, float * s3, float * dis, uint32_t batchsize, uint32_t m, float margin) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < m && idy < 3*batchsize) { uint32_t sel = idy / batchsize; uint32_t pos = idy % batchsize; if (dis[pos * 2 + 1] - dis[pos * 2] >= margin) { if (sel == 0) s1deriv[pos*m + idx] = 0; else if (sel == 1) s2deriv[pos*m + idx] = 0; else s3deriv[pos*m + idx] = 0; return; } float tem1, tem2; if (sel == 0) { //s1 tem1 = s1[pos*m + idx]; tem2 = (tem1 - s2[pos*m + idx]) / dis[pos * 2] - (tem1 - s3[pos*m + idx]) / dis[pos * 2 + 1]; tem2 = tem2 * (1 - tem1) * (1 + tem1); s1deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } else if (sel == 1) { //s2 tem1 = s2[pos*m + idx]; tem2 = (tem1 - s1[pos*m + idx]) / dis[pos * 2]; tem2 = tem2 * (1 - tem1) * (1 + tem1); s2deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } else { //s3 tem1 = s3[pos*m + idx]; tem2 = (s1[pos*m + idx] - tem1) / dis[pos * 2 + 1]; tem2 = tem2 * (1 - tem1) * (1 + tem1); s3deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } } } void cuda_Deriv_Dis(float * s1deriv, float * s2deriv, float * s3deriv, float * s1, float * s2, float * s3, float * dis, uint32_t batchsize, uint32_t m, float margin) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize*3 + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_deriv_dis), dim3(block_tail), dim3(thread_tail), 0, 0, s1deriv, s2deriv, s3deriv, s1, s2, s3, dis, batchsize, m, margin); } __global__ void cuda_deriv_dis_linear(float * s1deriv, float * s2deriv, float * s3deriv, float * s1, float * s2, float * s3, float * dis, uint32_t batchsize, uint32_t m, float margin) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < m && idy < 3 * batchsize) { uint32_t sel = idy / batchsize; uint32_t pos = idy % batchsize; if (dis[pos * 2 + 1] - dis[pos * 2] >= margin) { if (sel == 0) s1deriv[pos*m + idx] = 0; else if (sel == 1) s2deriv[pos*m + idx] = 0; else s3deriv[pos*m + idx] = 0; return; } float tem1, tem2; if (sel == 0) { //s1 tem1 = s1[pos*m + idx]; tem2 = (tem1 - s2[pos*m + idx]) / dis[pos * 2] - (tem1 - s3[pos*m + idx]) / dis[pos * 2 + 1]; //tem2 = tem2 * (1 - tem1) * (1 + tem1); s1deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } else if (sel == 1) { //s2 tem1 = s2[pos*m + idx]; tem2 = (tem1 - s1[pos*m + idx]) / dis[pos * 2]; //tem2 = tem2 * (1 - tem1) * (1 + tem1); s2deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } else { //s3 tem1 = s3[pos*m + idx]; tem2 = (s1[pos*m + idx] - tem1) / dis[pos * 2 + 1]; //tem2 = tem2 * (1 - tem1) * (1 + tem1); s3deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } } } void cuda_Deriv_Dis_Linear(float * s1deriv, float * s2deriv, float * s3deriv, float * s1, float * s2, float * s3, float * dis, uint32_t batchsize, uint32_t m, float margin) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize * 3 + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_deriv_dis_linear), dim3(block_tail), dim3(thread_tail), 0, 0, s1deriv, s2deriv, s3deriv, s1, s2, s3, dis, batchsize, m, margin); } __global__ void cuda_deriv_dis_rectified(float * s1deriv, float * s2deriv, float * s3deriv, float * s1, float * s2, float * s3, float * dis, uint32_t batchsize, uint32_t m, float margin, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < m && idy < 3 * batchsize) { uint32_t sel = idy / batchsize; uint32_t pos = idy % batchsize; //check if there is error if (dis[pos * 2 + 1] - dis[pos * 2] >= margin) { if (sel == 0) s1deriv[pos*m + idx] = 0; else if (sel == 1) s2deriv[pos*m + idx] = 0; else s3deriv[pos*m + idx] = 0; return; } float tem1, tem2; if (sel == 0) { //s1 tem1 = s1[pos*m + idx]; if (fabsf(tem1) < eps) { s1deriv[pos*m + idx] = 0; } else { tem2 = (tem1 - s2[pos*m + idx]) / dis[pos * 2] - (tem1 - s3[pos*m + idx]) / dis[pos * 2 + 1]; //tem2 = tem2 * (1 - tem1) * (1 + tem1); s1deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } } else if (sel == 1) { //s2 tem1 = s2[pos*m + idx]; if (fabsf(tem1) < eps) { s2deriv[pos*m + idx] = 0; } else { tem2 = (tem1 - s1[pos*m + idx]) / dis[pos * 2]; //tem2 = tem2 * (1 - tem1) * (1 + tem1); s2deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } } else { //s3 tem1 = s3[pos*m + idx]; if (fabsf(tem1) < eps) { s3deriv[pos*m + idx] = 0; } else { tem2 = (s1[pos*m + idx] - tem1) / dis[pos * 2 + 1]; //tem2 = tem2 * (1 - tem1) * (1 + tem1); s3deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } } } } void cuda_Deriv_Dis_Rectified(float * s1deriv, float * s2deriv, float * s3deriv, float * s1, float * s2, float * s3, float * dis, uint32_t batchsize, uint32_t m, float margin, float eps) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize * 3 + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_deriv_dis_rectified << <block_tail, thread_tail >> >(s1deriv, s2deriv, s3deriv, s1, s2, s3, dis, batchsize, m, margin, eps); } __global__ void cuda_calc_euclideandis(float * s1, float * s2, float * s3, float * res, uint32_t batchsize, uint32_t m, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < 2*batchsize) { int row = idx / batchsize; // first row(0): distance between s1 and s2; second row(1): distance between s1 and s3 int col = idx % batchsize; float * s = row > 0 ? s3 : s2; float tem; float dist = eps; for (uint32_t i = 0; i<m; i++) { tem = s1[col * m + i] - s[col * m + i]; dist += tem*tem; } dist = sqrtf(dist); res[2 * col + row] = dist; } } void cuda_Calc_EuclideanDis(float * s1, float * s2, float * s3, float * res, uint32_t batchsize, uint32_t m, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (2 * batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_calc_euclideandis), dim3(nBlockPerGrid), dim3(DEFAULT_THREAD_PER_BLOCK), 0, 0, s1, s2, s3, res, batchsize, m, eps); } __global__ void cuda_deriv_cosine_linear(float * q, float * d, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float a = 0; float b = eps; float c = eps; for(uint32_t i=0;i<m;i++) { a += q[idx * m + i] * d[idx * m + i]; b += q[idx * m + i] * q[idx * m + i]; c += d[idx * m + i] * d[idx * m + i]; } b = sqrtf(b); c = sqrtf(c); for(uint32_t i=0;i<m;i++) { dcq[idx * m + i] = (float)( (d[idx*m+i] * 1.0f / (b*c) - q[idx*m+i] * a * 1.0f / (b*b*b*c)) ); dcd[idx * m + i] = (float)( (q[idx*m+i] * 1.0f / (b*c) - d[idx*m+i] * a * 1.0f / (b*c*c*c)) ); dcq[idx * m + i] = dcq[idx * m + i] * 1.0f / batchsize; dcd[idx * m + i] = dcd[idx * m + i] * 1.0f / batchsize; } } } void cuda_Derive_Cosine_Linear(float * q, float * d, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_deriv_cosine_linear), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, q,d,dcq,dcd,batchsize,m,eps); } __global__ void cuda_deriv_cosine_rectified(float * q, float * d, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float a = 0; float b = eps; float c = eps; for(uint32_t i=0;i<m;i++) { a += q[idx * m + i] * d[idx * m + i]; b += q[idx * m + i] * q[idx * m + i]; c += d[idx * m + i] * d[idx * m + i]; } b = sqrtf(b); c = sqrtf(c); for(uint32_t i=0;i<m;i++) { if(fabsf(q[idx * m + i]) < eps) { dcq[idx * m + i] = 0; } else { dcq[idx * m + i] = (float)( (d[idx*m+i] * 1.0f / (b*c) - q[idx*m+i] * a * 1.0f / (b*b*b*c)) ); } dcq[idx * m + i] = dcq[idx * m + i] * 1.0f / batchsize; if(fabsf(d[idx * m + i]) < eps) { dcd[idx * m + i ] =0; } else { dcd[idx * m + i] = (float)( (q[idx*m+i] * 1.0f / (b*c) - d[idx*m+i] * a * 1.0f / (b*c*c*c)) ); } dcd[idx * m + i] = dcd[idx * m + i] * 1.0f / batchsize; } } } void cuda_Derive_Cosine_Rectified(float * q, float * d, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_deriv_cosine_rectified), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, q,d,dcq,dcd,batchsize,m,eps); } //optimized version -- hxd __global__ void cuda_deriv_cosine_ex(float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float a = 0; float b = 0; float c = 0; float bc, a_bbbc, a_bccc, batchsizenorm; float * q_iter = q + idx*m; float * d_iter = d + neg_list[idx]*m; float * q_iter_end = q_iter + m; while(q_iter < q_iter_end) { b += (*q_iter) * (*q_iter); c += (*d_iter) * (*d_iter); a += (*q_iter++) * (*d_iter++); } b = sqrtf(b); c = sqrtf(c); bc = b*c + eps; a_bbbc = a/(b*b*b*c + eps); a_bccc = a/(b*c*c*c + eps); batchsizenorm = 1.0f / batchsize; q_iter = q + idx*m; d_iter = d + neg_list[idx]*m; q_iter_end = q_iter + m; float * dcq_iter = dcq + idx*m; float * dcd_iter = dcd + idx*m; while(q_iter < q_iter_end) { *dcq_iter++ = (1.0f - *q_iter) * ( 1.0f + *q_iter) * (*d_iter / bc - *q_iter * a_bbbc) * batchsizenorm; *dcd_iter++ = (1.0f - *d_iter) * ( 1.0f + *d_iter) * (*q_iter / bc - *d_iter * a_bccc) * batchsizenorm; ++q_iter; ++d_iter; } } } void cuda_Deriv_Cosine_EX( float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_deriv_cosine_ex), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, q,d,neg_list,dcq,dcd,batchsize,m,eps); } __global__ void cuda_deriv_cosine_linear_ex(float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float a = 0; float b = eps; float c = eps; uint32_t mIndex = neg_list[idx]; for(uint32_t i=0;i<m;i++) { a += q[idx * m + i] * d[mIndex * m + i]; b += q[idx * m + i] * q[idx * m + i]; c += d[mIndex * m + i] * d[mIndex * m + i]; } b = sqrtf(b); c = sqrtf(c); for(uint32_t i=0;i<m;i++) { dcq[idx * m + i] = (float)( (d[mIndex*m+i] * 1.0f / (b*c) - q[idx*m+i] * a * 1.0f / (b*b*b*c)) ); dcd[idx * m + i] = (float)( (q[idx*m+i] * 1.0f / (b*c) - d[mIndex*m+i] * a * 1.0f / (b*c*c*c)) ); dcq[idx * m + i] = dcq[idx * m + i] * 1.0f / batchsize; dcd[idx * m + i] = dcd[idx * m + i] * 1.0f / batchsize; } } } void cuda_Deriv_Cosine_Linear_EX( float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_deriv_cosine_linear_ex), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, q,d,neg_list,dcq,dcd,batchsize,m,eps); } __global__ void cuda_deriv_cosine_rectified_ex(float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float a = 0; float b = eps; float c = eps; uint32_t mIndex = neg_list[idx]; for(uint32_t i=0;i<m;i++) { a += q[idx * m + i] * d[mIndex * m + i]; b += q[idx * m + i] * q[idx * m + i]; c += d[mIndex * m + i] * d[mIndex * m + i]; } b = sqrtf(b); c = sqrtf(c); for(uint32_t i=0;i<m;i++) { if(q[idx*m+i] == 0) { dcq[idx * m + i] = 0; } else { dcq[idx * m + i] = (float)( (d[mIndex*m+i] * 1.0f / (b*c) - q[idx*m+i] * a * 1.0f / (b*b*b*c)) ); } dcq[idx * m + i] = dcq[idx * m + i] * 1.0f / batchsize; if(d[mIndex*m+i] == 0) { dcd[idx * m + i] = 0; } else { dcd[idx * m + i] = (float)( (q[idx*m+i] * 1.0f / (b*c) - d[mIndex*m+i] * a * 1.0f / (b*c*c*c)) ); } dcd[idx * m + i] = dcd[idx * m + i] * 1.0f / batchsize; } } } void cuda_Deriv_Cosine_Rectified_EX( float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_deriv_cosine_rectified_ex), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, q,d,neg_list,dcq,dcd,batchsize,m,eps); } __global__ void cuda_deriv_tanh(float * delta, float * layer_output, uint32_t batchsize, uint32_t m) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < m && idy < batchsize ) { delta[idy * m + idx] = delta[idy * m +idx] * (1 - layer_output[idy * m + idx]) * ( 1 + layer_output[idy * m + idx]); } } void cuda_Deriv_Tanh( float * delta, float * layer_output, uint32_t batchsize, uint32_t m) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize * m + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_deriv_tanh), dim3(block_tail) ,dim3(thread_tail) , 0, 0, delta, layer_output, batchsize, m); } __global__ void cuda_deriv_rectified(float * delta, float * layer_output, uint32_t batchsize, uint32_t m) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < m && idy < batchsize ) { if(layer_output[idy * m + idx] == 0) { delta[idy * m + idx] = 0; // delta[idy * m +idx] ; } } } void cuda_Deriv_Rectified( float * delta, float * layer_output, uint32_t batchsize, uint32_t m) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize * m + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_deriv_rectified), dim3(block_tail) ,dim3(thread_tail) , 0, 0, delta, layer_output, batchsize, m); } //optimized version -- hxd __global__ void cuda_matrix_multipy(float * delta, float * weight, float * delta_low, uint32_t batchsize, uint32_t m, uint32_t n, uint32_t inverse) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if(idx < n && idy < batchsize) { //uint32_t row = idy; // / n; //uint32_t col = idx; // % n; float sum = 0; if(inverse == 1) { float * d_iter = delta + (idy * m); float * w_iter = weight + (idx * m); float * d_end_pt = d_iter + m; while(d_iter < d_end_pt) { sum += (*d_iter++) * (*w_iter++); } } else { float * d_iter = delta + (idy * m); float * w_iter = weight + idx; float * d_end_pt = d_iter + m; while(d_iter < d_end_pt) { sum += (*d_iter++) * (*w_iter); w_iter += n; } } delta_low[idy * n + idx] = sum; } } void cuda_Matrix_Multipy(float * delta, float * weight, float * delta_low, uint32_t batchsize, uint32_t m, uint32_t n, uint32_t inverse) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_matrix_multipy), dim3(block_tail) ,dim3(thread_tail) , 0, 0, delta, weight, delta_low, batchsize, m, n, inverse); } __global__ void cuda_cosine_similarity(float * a, float * b, float * c, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t mindex, uint32_t batchsize, uint32_t dimension, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float sumxx = 0; float sumyy = 0; float sumxy = 0; for(uint32_t i=0;i<dimension;i++) { sumxx += a[idx * dimension + i] * a[idx * dimension + i]; sumyy += b[idx * dimension + i] * b[idx * dimension + i]; sumxy += a[idx * dimension + i] * b[idx * dimension + i]; } c[mindex * BATCHSIZE + idx] = (float)( sumxy * 1.0f / (sqrtf( (float)(sumxx * sumyy)) + eps) ); } } void cuda_Cosine_Similarity(float * a, float * b, float * c, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t mindex, uint32_t batchsize, uint32_t dimension, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_cosine_similarity), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, a,b,c,nTrial,BATCHSIZE,mindex,batchsize, dimension, eps); } __global__ void cuda_innerproduct_similarity(float * a, float * b, float * c, uint32_t batchsize, uint32_t dimension) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float sumxy = 0; for(uint32_t i=0;i<dimension;i++) { sumxy += a[idx * dimension + i] * b[idx * dimension + i]; } c[idx] = (float)(sumxy * 1.0f); } } void cuda_InnerProduct_Similarity(float * a, float * b, float * c, uint32_t batchsize, uint32_t dimension) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_innerproduct_similarity), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, a, b, c, batchsize, dimension); } //optimized version -- hxd __global__ void cuda_cosine_similarity_ex(float * a, float * b,uint32_t * neg_list, float * c, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t mindex, uint32_t batchsize, uint32_t dimension, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float sumxx = 0; float sumyy = 0; float sumxy = 0; float * a_iter = a + (idx * dimension); float * b_iter = b + (neg_list[idx] * dimension); float * a_iter_end = a_iter + dimension; while(a_iter < a_iter_end) { sumxx += (*a_iter) * (*a_iter); sumyy += (*b_iter) * (*b_iter); sumxy += (*a_iter++) * (*b_iter++); } c[mindex * BATCHSIZE + idx] = (float)( sumxy / ((float)sqrtf(sumxx * sumyy) + eps) ); } } void cuda_Cosine_Similarity_EX(float * a, float * b, uint32_t * neg_list, float * c, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t mindex, uint32_t batchsize, uint32_t dimension, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_cosine_similarity_ex), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, a,b,neg_list,c,nTrial,BATCHSIZE,mindex,batchsize, dimension, eps); } __global__ void cuda_cal_alpha(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < (nTrial-1)*batchsize) { uint32_t row = idx / batchsize; uint32_t col = idx % batchsize; alpha[row * BATCHSIZE + col + BATCHSIZE] = expf( (float)(-gamma * (alpha[col] - alpha[row * BATCHSIZE + col + BATCHSIZE]))) ; } } __global__ void cuda_cal_alpha_sum(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma, uint32_t init) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float sum = init; for(uint32_t i=1;i<nTrial;i++) { sum += alpha[i * BATCHSIZE + idx]; } alpha[idx] = sum; } } __global__ void cuda_cal_alpha_norm(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < (nTrial-1)*batchsize) { uint32_t row = idx / batchsize; uint32_t col = idx % batchsize; alpha[row * BATCHSIZE + col + BATCHSIZE] = (float)((gamma * alpha[row * BATCHSIZE + col + BATCHSIZE])/ alpha[col]); //expf( (float)(-gamma * (alpha[col] - alpha[row * BATCHSIZE + col + BATCHSIZE]))) ; } } void cuda_Calculate_Alpha(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid_1 = ((nTrial-1)*batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_cal_alpha), dim3(nBlockPerGrid_1) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, alpha, nTrial,BATCHSIZE,batchsize,gamma); uint32_t nBlockPerGrid_2 = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_cal_alpha_sum), dim3(nBlockPerGrid_2) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, alpha, nTrial,BATCHSIZE,batchsize,gamma, 1); hipLaunchKernelGGL(( cuda_cal_alpha_norm), dim3(nBlockPerGrid_1) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, alpha, nTrial,BATCHSIZE,batchsize,gamma); hipLaunchKernelGGL(( cuda_cal_alpha_sum), dim3(nBlockPerGrid_2) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, alpha, nTrial,BATCHSIZE,batchsize,gamma, 0); } __global__ void cuda_cal_alpha_norm_MXE(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < (nTrial-1)*batchsize) { uint32_t row = idx / batchsize; uint32_t col = idx % batchsize; alpha[row * BATCHSIZE + col + BATCHSIZE] = (float)((gamma * alpha[row * BATCHSIZE + col + BATCHSIZE])/ alpha[col]/ alpha[col]); //expf( (float)(-gamma * (alpha[col] - alpha[row * BATCHSIZE + col + BATCHSIZE]))) ; } } void cuda_Calculate_Alpha_MXE(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid_1 = ((nTrial-1)*batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_cal_alpha), dim3(nBlockPerGrid_1) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, alpha, nTrial,BATCHSIZE,batchsize,gamma); uint32_t nBlockPerGrid_2 = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_cal_alpha_sum), dim3(nBlockPerGrid_2) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, alpha, nTrial,BATCHSIZE,batchsize,gamma, 1); hipLaunchKernelGGL(( cuda_cal_alpha_norm_MXE), dim3(nBlockPerGrid_1) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, alpha, nTrial,BATCHSIZE,batchsize,gamma); hipLaunchKernelGGL(( cuda_cal_alpha_sum), dim3(nBlockPerGrid_2) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, alpha, nTrial,BATCHSIZE,batchsize,gamma, 0); } __global__ void cuda_cal_alpha_PAIRRANK(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float msum = 0; for(int n = 1; n < nTrial; n++) { float a = gamma * (1.0f - 1.0f / (1 + expf(- gamma * (alpha[idx] - alpha[n * BATCHSIZE + idx] )))); alpha[n * BATCHSIZE + idx] = a; msum += a; } alpha[idx] = msum; } } void cuda_Calculate_Alpha_PAIRRANK(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_cal_alpha_PAIRRANK), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, alpha, nTrial, BATCHSIZE, batchsize, gamma); } __global__ void cuda_cal_alpha_nce(float * alpha, float* dist, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { alpha[idx] = gamma - gamma / (1.0f + (nTrial - 1) * expf(dist[idx] - gamma * alpha[idx] + gamma)); //+gamma is from hxd, sd doesn't have this } else if(idx < nTrial*batchsize) { uint32_t row = idx / batchsize; uint32_t col = idx % batchsize; alpha[row * BATCHSIZE + col] = gamma / (1.0f + (nTrial - 1) * expf(dist[row * BATCHSIZE + col] - gamma * alpha[row * BATCHSIZE + col] + gamma)); //+gamma is from hxd, sd doesn't have this } } void cuda_Calculate_Alpha_NCE(float* alpha, float* dist, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (nTrial*batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_cal_alpha_nce), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, alpha, dist, nTrial,BATCHSIZE,batchsize,gamma); } __global__ void cuda_cal_alpha_nce2(float * alpha, float* dist, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < nTrial*batchsize) { uint32_t row = idx / batchsize; uint32_t col = idx % batchsize; float s = 1.0f / (1.0f + (nTrial - 1) * expf(dist[row * BATCHSIZE + col] - gamma * alpha[row * BATCHSIZE + col] + gamma)); //+gamma is from hxd, sd doesn't have this alpha[row * BATCHSIZE + col] = gamma * s * (1.0f - s); } } void cuda_Calculate_Alpha_NCE2(float* alpha, float* dist, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (nTrial*batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_cal_alpha_nce2), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, alpha, dist, nTrial,BATCHSIZE,batchsize,gamma); } __global__ void cuda_fillout_dist_nce(float* dist, uint32_t* neg_list, uint32_t nTrailPlus1, uint32_t BATCH_SIZE, uint32_t mindex, uint32_t batchsize) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { uint32_t mtindex = neg_list[idx]; dist[mindex * BATCH_SIZE + idx] = dist[mtindex]; } } void cuda_FillOut_Dist_NCE(float* dist, uint32_t* neg_list, uint32_t nTrailPlus1, uint32_t BATCH_SIZE, uint32_t mindex, uint32_t batchsize) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_fillout_dist_nce), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, dist, neg_list, nTrailPlus1, BATCH_SIZE, mindex, batchsize); } //optimized version -- hxd __global__ void cuda_matrix_product(float * a1, float * b1, float * a2, float * b2, float * a3, float * b3, float * c, uint32_t batchsize, uint32_t m, uint32_t n) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if(idx < n && idy < m ) { float sum = 0; for (uint32_t i = 0; i < batchsize; i++) { sum += a1[m*i + idy] * b1[n*i + idx]; sum += a2[m*i + idy] * b2[n*i + idx]; sum += a3[m*i + idy] * b3[n*i + idx]; } //uint32_t row = idy; // / n; //uint32_t col = idx;// % n; //float *a_iter = a+row; //float *b_iter = b+col; //float *a_end_pt = a_iter + (m*batchsize); //while(a_iter < a_end_pt) //{ // sum += (*a_iter) * (*b_iter); // a_iter += m; // b_iter += n; //} c[idy * n + idx] = sum; } } void cuda_Matrix_Product(float * a1, float * b1, float * a2, float * b2, float * a3, float * b3, float * c, uint32_t batchsize, uint32_t m, uint32_t n) //, uint32_t kept, float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_matrix_product), dim3(block_tail), dim3(thread_tail), 0, 0, a1, b1, a2, b2, a3, b3, c, batchsize, m, n); //, kept, alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_matrix_product_sup(float * a, float * b, float * c, uint32_t batchsize, uint32_t m, uint32_t n) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < n && idy < m) { float sum = 0; for (uint32_t i = 0; i < batchsize; i++) { sum += a[m*i + idy] * b[n*i + idx]; } //uint32_t row = idy; // / n; //uint32_t col = idx;// % n; //float *a_iter = a+row; //float *b_iter = b+col; //float *a_end_pt = a_iter + (m*batchsize); //while(a_iter < a_end_pt) //{ // sum += (*a_iter) * (*b_iter); // a_iter += m; // b_iter += n; //} c[idy * n + idx] = sum; } } void cuda_Matrix_Product_Sup(float * a, float * b, float * c, uint32_t batchsize, uint32_t m, uint32_t n) //, uint32_t kept, float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_matrix_product_sup), dim3(block_tail), dim3(thread_tail), 0, 0, a, b, c, batchsize, m, n); //, kept, alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_convolution_matrix_product_INTEX(float * deriv1, int * maxpooling_index1, float * deriv2, int * maxpooling_index2, float * deriv3, int * maxpooling_index3, float * wordLT, int * Word_Index1, int * Word_Index2, int * Word_Index3, int win_size, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int weightDim) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < output_dimension && idy < weightDim) { float sum = 0; int target_word1, target_word2, target_word3, widx1, widx2, widx3, wordpos, offset, precompIdx; wordpos = idy / Feature_Dimension; offset = idy % Feature_Dimension; for(int b=0;b<batchsize;b++) { precompIdx = b * output_dimension + idx; target_word1 = maxpooling_index1[precompIdx]; target_word2 = maxpooling_index2[precompIdx]; target_word3 = maxpooling_index3[precompIdx]; int widx1 = Word_Index1[target_word1 + wordpos]; int widx2 = Word_Index2[target_word2 + wordpos]; int widx3 = Word_Index3[target_word3 + wordpos]; sum += deriv1[precompIdx] * wordLT[Feature_Dimension * widx1 + offset]; sum += deriv2[precompIdx] * wordLT[Feature_Dimension * widx2 + offset]; sum += deriv3[precompIdx] * wordLT[Feature_Dimension * widx3 + offset]; } grad[idy * output_dimension + idx] = sum; } } void cuda_Convolution_Matrix_Product_INTEX(float * deriv1, int * maxpooling_index1, float * deriv2, int * maxpooling_index2, float * deriv3, int * maxpooling_index3, float * wordLT, int * Word_Index1, int * Word_Index2, int * Word_Index3, int win_size, int batchsize, int output_dimension, float * grad, int Feature_Dimension) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { int weightDim = Feature_Dimension * win_size; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (weightDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_convolution_matrix_product_INTEX), dim3(block_tail), dim3(thread_tail), 0, 0, deriv1, maxpooling_index1, deriv2, maxpooling_index2, deriv3, maxpooling_index3, wordLT, Word_Index1, Word_Index2, Word_Index3, win_size, batchsize, output_dimension, grad, Feature_Dimension, weightDim); //,alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_convolution_matrix_product_sup(float * deriv, int * maxpooling_index, float * wordLT, int * Word_Index, int win_size, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int weightDim) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < output_dimension && idy < weightDim) { float sum = 0; int target_word1, widx1, wordpos, offset, precompIdx; wordpos = idy / Feature_Dimension; offset = idy % Feature_Dimension; for (int b = 0; b<batchsize; b++) { precompIdx = b * output_dimension + idx; target_word1 = maxpooling_index[precompIdx]; int widx1 = Word_Index[target_word1 + wordpos]; sum += deriv[precompIdx] * wordLT[Feature_Dimension * widx1 + offset]; } grad[idy * output_dimension + idx] = sum; } } void cuda_Convolution_Matrix_Product_Sup(float * deriv, int * maxpooling_index, float * wordLT, int * Word_Index, int win_size, int batchsize, int output_dimension, float * grad, int Feature_Dimension) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { int weightDim = Feature_Dimension * win_size; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (weightDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_convolution_matrix_product_sup), dim3(block_tail), dim3(thread_tail), 0, 0, deriv, maxpooling_index, wordLT, Word_Index, win_size, batchsize, output_dimension, grad, Feature_Dimension, weightDim); //,alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_multiconv_matrix_product_INTEX(float * deriv1, int * maxpooling_index1, float * deriv2, int * maxpooling_index2, float * deriv3, int * maxpooling_index3, float * wordLT, int * Word_Index1, int * Word_Index2, int * Word_Index3, int win_size, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int weightDim, int currOuputDim, int pastOutdim) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < weightDim && idy < currOuputDim) { float sum = 0; int target_word1, target_word2, target_word3, widx1, widx2, widx3, wordpos, offset, precompIdx; wordpos = idx / Feature_Dimension; offset = idx % Feature_Dimension; for (int b = 0; b<batchsize; b++) { precompIdx = b * output_dimension + idy + pastOutdim; target_word1 = maxpooling_index1[precompIdx]; target_word2 = maxpooling_index2[precompIdx]; target_word3 = maxpooling_index3[precompIdx]; int widx1 = Word_Index1[target_word1 + wordpos]; int widx2 = Word_Index2[target_word2 + wordpos]; int widx3 = Word_Index3[target_word3 + wordpos]; sum += deriv1[precompIdx] * wordLT[Feature_Dimension * widx1 + offset]; sum += deriv2[precompIdx] * wordLT[Feature_Dimension * widx2 + offset]; sum += deriv3[precompIdx] * wordLT[Feature_Dimension * widx3 + offset]; } grad[idy * weightDim + idx] = sum; } } void cuda_MultiConv_Matrix_Product_INTEX(float * deriv1, int * maxpooling_index1, float * deriv2, int * maxpooling_index2, float * deriv3, int * maxpooling_index3, float * wordLT, int * Word_Index1, int * Word_Index2, int * Word_Index3, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int winsize, int fmsize, int accu, int accu_para) // the last two pointers are on host //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { int weightDim = Feature_Dimension * winsize; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((weightDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (fmsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_multiconv_matrix_product_INTEX), dim3(block_tail), dim3(thread_tail), 0, 0, deriv1, maxpooling_index1, deriv2, maxpooling_index2, deriv3, maxpooling_index3, wordLT, Word_Index1, Word_Index2, Word_Index3, winsize, batchsize, output_dimension, (grad + accu_para), Feature_Dimension, weightDim, fmsize, accu); } __global__ void cuda_multiconv_matrix_product_sup(float * deriv, int * maxpooling_index, float * wordLT, int * Word_Index, int win_size, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int weightDim, int currOuputDim, int pastOutdim) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < weightDim && idy < currOuputDim) { float sum = 0; int target_word1, widx1, wordpos, offset, precompIdx; wordpos = idx / Feature_Dimension; offset = idx % Feature_Dimension; for (int b = 0; b<batchsize; b++) { precompIdx = b * output_dimension + idy + pastOutdim; target_word1 = maxpooling_index[precompIdx]; int widx1 = Word_Index[target_word1 + wordpos]; sum += deriv[precompIdx] * wordLT[Feature_Dimension * widx1 + offset]; } grad[idy * weightDim + idx] = sum; } } void cuda_MultiConv_Matrix_Product_Sup(float * deriv, int * maxpooling_index, float * wordLT, int * Word_Index, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int winsize, int fmsize, int accu, int accu_para) // the last two pointers are on host //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { int weightDim = Feature_Dimension * winsize; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((weightDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (fmsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_multiconv_matrix_product_sup), dim3(block_tail), dim3(thread_tail), 0, 0, deriv, maxpooling_index, wordLT, Word_Index, winsize, batchsize, output_dimension, (grad + accu_para), Feature_Dimension, weightDim, fmsize, accu); } __global__ void cuda_multiconv_compute_wvderiv(float * deriv, int * maxpooling_index, float * weight, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int * winsizes, int * fmsizes) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < Feature_Dimension && idy < batchsize) { int currFilterset = 0, counter = 0, accuoffset = 0, currweightDim; float cacheDeriv; int wordIdx, i; for (int b = 0; b < output_dimension; b++) { if (counter >= fmsizes[currFilterset]) { counter = 0; accuoffset += Feature_Dimension * winsizes[currFilterset] * fmsizes[currFilterset]; currFilterset++; } currweightDim = Feature_Dimension * winsizes[currFilterset]; cacheDeriv = deriv[idy*output_dimension + b]; wordIdx = maxpooling_index[idy*output_dimension + b]; for (i = 0; i < winsizes[currFilterset]; i++) { grad[(wordIdx + i)*Feature_Dimension + idx] += cacheDeriv * weight[accuoffset + counter*currweightDim + (i*Feature_Dimension + idx)]; } counter++; } } } void cuda_MultiConv_Compute_WVDERIV(float * deriv, int * maxpooling_index, float * weight, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int * winsizes, int * fmsizes) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_multiconv_compute_wvderiv), dim3(block_tail), dim3(thread_tail), 0, 0, deriv, maxpooling_index, weight, batchsize, output_dimension, grad, Feature_Dimension, winsizes, fmsizes); //,alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_conv_compute_wvderiv(float * deriv, int * maxpooling_index, float * weight, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int winsize) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < Feature_Dimension && idy < batchsize) { float cacheDeriv; int wordIdx, i; for (int b = 0; b < output_dimension; b++) { cacheDeriv = deriv[idy*output_dimension + b]; wordIdx = maxpooling_index[idy*output_dimension + b]; for (i = 0; i < winsize; i++) { grad[(wordIdx + i)*Feature_Dimension + idx] += cacheDeriv * weight[(i*Feature_Dimension + idx)*output_dimension + b]; } } } } void cuda_Conv_Compute_WVDERIV(float * deriv, int * maxpooling_index, float * weight, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int winsize) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_conv_compute_wvderiv), dim3(block_tail), dim3(thread_tail), 0, 0, deriv, maxpooling_index, weight, batchsize, output_dimension, grad, Feature_Dimension, winsize); //,alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_convolution_matrix_multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Word_Index, uint32_t * Word_Margin, uint32_t Word_SeqLen, float * wordLT, float * con_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t win_size) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < output_dimension && idy < Word_SeqLen) { uint32_t mSmp_idx = Word_Margin[idy]; uint32_t wordEnd = Smp_Index[mSmp_idx]; uint32_t wordBegin = 0; if (mSmp_idx > 0) wordBegin = Smp_Index[mSmp_idx - 1]; if (idy >= wordBegin && idy <= (wordEnd - win_size)) { output[idy * output_dimension + idx] = 0; float sum = 0; for (int w = 0; w < win_size; w++) { uint32_t wordIdx = Word_Index[idy + w]; // get its vector from word lookup table for (uint32_t i = 0; i < Feature_dimension; i++) { sum += wordLT[wordIdx*Feature_dimension + i] * con_weight[(w * Feature_dimension + i)*output_dimension + idx]; } } output[idy * output_dimension + idx] = sum; } } } void cuda_Convolution_Matrix_Multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Word_Index, uint32_t * Word_Margin, uint32_t Word_SeqLen, float * wordLT, float * con_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t win_size) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (Word_SeqLen + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_convolution_matrix_multiply_INTEX), dim3(block_tail), dim3(thread_tail), 0, 0, Smp_Index, batchsize, Word_Index, Word_Margin, Word_SeqLen, wordLT, con_weight, output, Feature_dimension, output_dimension, win_size); } __global__ void cuda_multiconv_matrix_multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Word_Index, uint32_t * Word_Margin, uint32_t Word_SeqLen, float * wordLT, float * con_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t * win_sizes, uint32_t * fm_sizes) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < output_dimension && idy < Word_SeqLen) { int filterClass = 0; uint32_t idx_offset = idx; uint32_t weightOffset = 0; while (idx_offset >= fm_sizes[filterClass]) { weightOffset += Feature_dimension * win_sizes[filterClass] * fm_sizes[filterClass]; idx_offset = idx_offset - fm_sizes[filterClass]; filterClass++; } uint32_t win_size = win_sizes[filterClass]; uint32_t mSmp_idx = Word_Margin[idy]; uint32_t wordEnd = Smp_Index[mSmp_idx]; uint32_t wordBegin = 0; if (mSmp_idx > 0) wordBegin = Smp_Index[mSmp_idx - 1]; if (idy >= wordBegin && idy <= (wordEnd - win_size)) { output[idy * output_dimension + idx] = 0; float sum = 0; uint32_t woffset = weightOffset + idx_offset * (win_size * Feature_dimension); for (int w = 0; w < win_size; w++) { uint32_t wordIdx = Word_Index[idy + w]; // get its vector from word lookup table for (uint32_t i = 0; i < Feature_dimension; i++) { sum += wordLT[wordIdx*Feature_dimension + i] * con_weight[woffset + w * Feature_dimension + i]; } } output[idy * output_dimension + idx] = sum; } } } void cuda_MultiConv_Matrix_Multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Word_Index, uint32_t * Word_Margin, uint32_t Word_SeqLen, float * wordLT, float * con_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t * win_sizes, uint32_t * fm_sizes) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (Word_SeqLen + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_multiconv_matrix_multiply_INTEX), dim3(block_tail), dim3(thread_tail), 0, 0, Smp_Index, batchsize, Word_Index, Word_Margin, Word_SeqLen, wordLT, con_weight, output, Feature_dimension, output_dimension, win_sizes, fm_sizes); } __global__ void cuda_max_pooling(float * pooling_feas, int * Smp_Index, int batchsize, float * output, int * maxpooling_index, int output_dimension, int win_size) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if(idy < batchsize && idx < output_dimension) { //output[idy * output_dimension + idx] = 0; uint32_t col_end = Smp_Index[idy] - win_size; uint32_t col_begin = 0; if(idy > 0) { col_begin = Smp_Index[idy-1]; } float max_value = 0; int max_index = -1; for(uint32_t i=col_begin;i<=col_end; i++) { if(max_index == -1 || pooling_feas[i * output_dimension + idx] > max_value ) { max_value = pooling_feas[i * output_dimension + idx]; max_index = i; } } output[idy * output_dimension + idx] = max_value; maxpooling_index[idy * output_dimension + idx] = max_index; } } void cuda_Max_Pooling(float * pooling_feas, int * Smp_Index, int batchsize, float * output,int * maxpooling_index, int output_dimension, int win_size) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_max_pooling), dim3(block_tail), dim3(thread_tail), 0, 0, pooling_feas, Smp_Index, batchsize, output, maxpooling_index, output_dimension, win_size); } __global__ void cuda_multi_max_pooling(float * pooling_feas, int * Smp_Index, int batchsize, float * output, int * maxpooling_index, int output_dimension, int * win_sizes, int * fm_sizes) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idy < batchsize && idx < output_dimension) { int filterClass = 0; uint32_t idx_offset = idx; while (idx_offset >= fm_sizes[filterClass]) { idx_offset = idx_offset - fm_sizes[filterClass]; filterClass++; } uint32_t win_size = win_sizes[filterClass]; //output[idy * output_dimension + idx] = 0; uint32_t col_end = Smp_Index[idy] - win_size; uint32_t col_begin = 0; if (idy > 0) { col_begin = Smp_Index[idy - 1]; } float max_value = 0; int max_index = -1; for (uint32_t i = col_begin; i <= col_end; i++) { if (max_index == -1 || pooling_feas[i * output_dimension + idx] > max_value) { max_value = pooling_feas[i * output_dimension + idx]; max_index = i; } } output[idy * output_dimension + idx] = max_value; maxpooling_index[idy * output_dimension + idx] = max_index; } } void cuda_Multi_Max_Pooling(float * pooling_feas, int * Smp_Index, int batchsize, float * output, int * maxpooling_index, int output_dimension, int * win_sizes, int * fm_sizes) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_multi_max_pooling), dim3(block_tail), dim3(thread_tail), 0, 0, pooling_feas, Smp_Index, batchsize, output, maxpooling_index, output_dimension, win_sizes, fm_sizes); } __global__ void cuda_lstm_max_pooling(float * pooling_feas, int * Smp_Index, int batchsize, float * output, int * maxpooling_index, int output_dimension) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idy < batchsize && idx < output_dimension) { //output[idy * output_dimension + idx] = 0; uint32_t col_end = Smp_Index[idy] - 1; uint32_t col_begin = 0; if (idy > 0) { col_begin = Smp_Index[idy - 1]; } float max_value = 0; int max_index = -1; for (uint32_t i = col_begin; i <= col_end; i++) { if (max_index == -1 || pooling_feas[i * output_dimension + idx] > max_value) { max_value = pooling_feas[i * output_dimension + idx]; max_index = i; } } output[idy * output_dimension + idx] = max_value; maxpooling_index[idy * output_dimension + idx] = max_index; } } void cuda_LSTM_Max_Pooling(float * pooling_feas, int * Smp_Index, int batchsize, float * output, int * maxpooling_index, int output_dimension) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_lstm_max_pooling), dim3(block_tail), dim3(thread_tail), 0, 0, pooling_feas, Smp_Index, batchsize, output, maxpooling_index, output_dimension); } __global__ void cuda_seq_sparse_matrix_multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Seg_Index, uint32_t * Seg_Margin, float * Seg_Len, uint32_t seg_size, uint32_t * Fea_Index, float * Fea_Value, uint32_t elementsize, float * mul_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t win_size) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if(idx < output_dimension && idy < batchsize) { uint32_t seg_end = Smp_Index[idy]; uint32_t seg_begin = 0; if(idy > 0) { seg_begin = Smp_Index[idy-1]; } float sum = 0; for(uint32_t word_idx = seg_begin; word_idx < seg_end; ++word_idx) { uint32_t col_end = Seg_Index[word_idx]; uint32_t col_begin = 0; if(word_idx > 0) { col_begin = Seg_Index[word_idx - 1]; } for(uint32_t i=col_begin;i<col_end; ++i) { uint32_t fea_idx = Fea_Index[i]; sum += Fea_Value[i] * mul_weight[((word_idx - seg_begin) * Feature_dimension + fea_idx) * output_dimension + idx]; } } output[idy * output_dimension + idx] = sum; } } /* Added by xinson, 2/17/2014 This version still computes sparse matrix (batch * input) multiples a dense matrix. However, each rwo of the sparse matrix is more than just BOW; it is a sequence of BOW. Put it another way, the sparse matrix has exactly the same structure as what is used in Convolutional_Sparse_Matrix_Multiply_INTEX. As a result, the dense matrix (mul_weight) is of size (Feature_dimension * win_size) * output_dimension, where win_size is how many words per input sequence instance. Note that all input should have exactly the same number of words. One word is represented as an instance of BOW. */ void cuda_SEQ_Sparse_Matrix_Multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Seg_Index, uint32_t * Seg_Margin, float * Seg_Len, uint32_t seg_size, uint32_t * Fea_Index, float * Fea_Value, uint32_t elementsize, float * mul_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t win_size) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_seq_sparse_matrix_multiply_INTEX), dim3(block_tail), dim3(thread_tail), 0, 0, Smp_Index, batchsize, Seg_Index, Seg_Margin, Seg_Len, seg_size, Fea_Index, Fea_Value, elementsize, mul_weight, output, Feature_dimension, output_dimension,win_size); } __global__ void cuda_seq_sparse_matrix_transpose_multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Seg_Index, uint32_t * Seg_Margin, float * Seg_Len, uint32_t seg_size, uint32_t * Fea_Index, float * Fea_Value, uint32_t elementsize, float * mul_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t win_size) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < output_dimension) { uint32_t seg_begin = 0; for(uint32_t sample = 0; sample < batchsize; ++sample) { uint32_t seg_end = Smp_Index[sample]; float sum = 0; for(uint32_t word_idx = seg_begin; word_idx < seg_end; ++word_idx) { uint32_t col_end = Seg_Index[word_idx]; uint32_t col_begin = 0; if(word_idx > 0) { col_begin = Seg_Index[word_idx - 1]; } for(uint32_t i=col_begin;i<col_end; ++i) { uint32_t fea_idx = Fea_Index[i]; mul_weight[((word_idx - seg_begin) * Feature_dimension + fea_idx) * output_dimension + idx] += Fea_Value[i] * output[sample * output_dimension + idx]; } } seg_begin = seg_end; } } } /* Added by xinson, 2/17/2014 Given the same two inputs of an sparse matrix A (indexed by rows, size: batch * X), and a dense matrix B (size: batch * Y), computing C = A^T * B (size: X * Y). Although we compute the transpose of A multiplied by B, the code does not perform sparse transpose and indexing at all. Instead, it partitioned along the columns of the result C matrix. float * output is B. float * mul_weight is C. Zero initialization/clear on C is required in advance. */ void cuda_SEQ_Sparse_Matrix_Transpose_Multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Seg_Index, uint32_t * Seg_Margin, float * Seg_Len, uint32_t seg_size, uint32_t * Fea_Index, float * Fea_Value, uint32_t elementsize, float * mul_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t win_size) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_seq_sparse_matrix_transpose_multiply_INTEX), dim3(block_tail), dim3(thread_tail), 0, 0, Smp_Index, batchsize, Seg_Index, Seg_Margin, Seg_Len, seg_size, Fea_Index, Fea_Value, elementsize, mul_weight, output, Feature_dimension, output_dimension,win_size); } __global__ void cuda_matrix_weightadd(float * gpu_floats_a, float * gpu_floats_b, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < dimension && idy < batchsize) { if(keep != 0) { gpu_floats_a[idy*dimension+idx] += keep * gpu_floats_b[idy*dimension+idx] * mweight[start + idy]; } else { gpu_floats_a[idy*dimension+idx] = gpu_floats_b[idy*dimension+idx] * mweight[start + idy]; } } } void cuda_Matrix_WeightAdd(float * gpu_floats_a, float * gpu_floats_b, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_matrix_weightadd), dim3(block_tail) ,dim3(thread_tail) , 0, 0, gpu_floats_a, gpu_floats_b, batchsize, dimension, mweight,start, keep); } __global__ void cuda_matrix_weightadd_ex(float * gpu_floats_a, float * gpu_floats_b, int * inver_neg_index, int * inver_neg_value, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < dimension && idy < batchsize) { int col_end = inver_neg_index[idy]; int col_begin = 0; if(idy > 0) { col_begin = inver_neg_index[idy - 1]; } float sum = 0; for(int i=col_begin; i<col_end; i++) { int row = inver_neg_value[i]; sum += gpu_floats_b[row * dimension + idx] * mweight[start + row]; } if(keep != 0) { gpu_floats_a[idy*dimension+idx] += keep * sum; } else { gpu_floats_a[idy*dimension+idx] =sum; } } } void cuda_Matrix_WeightAdd_EX(float * gpu_floats_a, float * gpu_floats_b, int * inver_neg_index, int * inver_neg_value, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_matrix_weightadd_ex), dim3(block_tail) ,dim3(thread_tail) , 0, 0, gpu_floats_a, gpu_floats_b, inver_neg_index, inver_neg_value, batchsize, dimension, mweight, start, keep); } __global__ void cuda_sparse2dense_matrix(int * Smp_Idx, int * Fea_Idx, float * Fea_Value, float * matrix, int batchsize, int outputDimension) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { int end = Smp_Idx[idx]; int begin = idx >= 1 ? Smp_Idx[idx - 1] : 0; for (int k = begin; k < end; k++) { matrix[idx * outputDimension + Fea_Idx[k]] = Fea_Value[k]; } } } void cuda_Sparse2Dense_Matrix(int * Smp_Idx, int * Fea_Idx, float * Fea_Value, float * matrix, int batchsize, int outputDimension) { dim3 thread_tail(DEFAULT_THREAD_PER_BLOCK); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK); hipLaunchKernelGGL(( cuda_sparse2dense_matrix), dim3(block_tail),dim3(thread_tail), 0, 0, Smp_Idx, Fea_Idx, Fea_Value, matrix, batchsize, outputDimension); } __global__ void cuda_matrix_aggragate(float * a1, float * a2, float * a3, float * b, uint32_t batchsize, uint32_t m) //uint32_t kept, float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < m) { float sum = 0; for(uint32_t i=0;i<batchsize;i++) { sum += a1[i * m + idx] + a2[i * m + idx] + a3[i * m + idx]; //* alpha[alpha_index * BATCH_SIZE + i]; } b[idx] = sum; } } void cuda_Matrix_Aggragate(float * a1, float * a2, float * a3, float * b, uint32_t batchsize, uint32_t m) //, uint32_t kept, float * alpha, // uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (m + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_matrix_aggragate), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, a1,a2,a3,b,batchsize,m ); //,kept, alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_matrix_aggragate_sup(float * a, float * b, uint32_t batchsize, uint32_t m) //uint32_t kept, float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < m) { float sum = 0; for (uint32_t i = 0; i<batchsize; i++) { sum += a[i * m + idx]; //* alpha[alpha_index * BATCH_SIZE + i]; } b[idx] = sum; } } void cuda_Matrix_Aggragate_Sup(float * a, float * b, uint32_t batchsize, uint32_t m) //, uint32_t kept, float * alpha, // uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (m + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_matrix_aggragate_sup), dim3(nBlockPerGrid), dim3(DEFAULT_THREAD_PER_BLOCK), 0, 0, a, b, batchsize, m); //,kept, alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_matrix_add_offset(float * a, uint32_t offset_a, float * b, uint32_t offset_b, int len, float mweight) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < len) { a[offset_a + idx] += b[offset_b + idx] * mweight ; //* alpha[alpha_index * BATCH_SIZE + i]; } } void cuda_Matrix_Add_OFFSET(float * gpu_floats_a, uint32_t offset_a, float * gpu_floats_b, uint32_t offset_b, int len, float mweight) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (len + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_matrix_add_offset), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, gpu_floats_a, offset_a, gpu_floats_b, offset_b, len, mweight); } hipblasHandle_t global_handle; void cublas_Init() { hipblasCreate(&global_handle); } void cublas_Destroy() { hipblasDestroy(global_handle); } void cublas_Sasum(float *x, int len, int norm, float * result) { hipblasSasum(global_handle, len , x , norm, result); } void cublas_Matrix_Multipy(float * delta, float * weight, float * delta_low, uint32_t batchsize, uint32_t m, uint32_t n, uint32_t inverse) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; float al = 1.0f; float bet = 0; if(inverse == 0) { hipblasSgemm(global_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, batchsize, m, &al, weight, n, delta, m, &bet, delta_low, n); } else { hipblasSgemm(global_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, n, batchsize, m, &al, weight, m, delta, m, &bet, delta_low, n); } } //optimized version -- hxd & yeshen __global__ void cuda_cosine_similarity_ex_full(float * a, float * b, uint32_t * neg_list, float * c, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t dimension, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize && idy < nTrial) { float sumxx = 0; float sumyy = 0; float sumxy = 0; float * a_iter = a + (idx * dimension); float * b_iter = b + (neg_list[idy * BATCHSIZE + idx] * dimension); float * a_iter_end = a_iter + dimension; while(a_iter < a_iter_end) { sumxx += (*a_iter) * (*a_iter); sumyy += (*b_iter) * (*b_iter); sumxy += (*a_iter++) * (*b_iter++); } c[ (idy + 1) * BATCHSIZE + idx] = (float)( sumxy / ((float)sqrtf(sumxx * sumyy) + eps) ); } } void cuda_Cosine_Similarity_EX_Full(float * a, float * b, uint32_t * neg_list, float * c, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t dimension, float eps) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( nTrial + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_cosine_similarity_ex_full), dim3(block_tail) , dim3(thread_tail) , 0, 0, a, b, neg_list, c, nTrial, BATCHSIZE, batchsize, dimension, eps); } __global__ void cuda_fillout_dist_nce_full(float* dist, uint32_t* neg_list, uint32_t nTrail, uint32_t BATCH_SIZE, uint32_t batchsize) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize && idy < nTrail) { uint32_t mtindex = neg_list[idy * BATCH_SIZE + idx]; dist[BATCH_SIZE + idy * BATCH_SIZE + idx] = dist[mtindex]; } } void cuda_FillOut_Dist_NCE_Full(float* dist, uint32_t* neg_list, uint32_t nTrail, uint32_t BATCH_SIZE, uint32_t batchsize) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( nTrail + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_fillout_dist_nce_full), dim3(block_tail) , dim3(thread_tail) , 0, 0, dist, neg_list, nTrail, BATCH_SIZE, batchsize); } //optimized version -- hxd & yeshen. __global__ void cuda_deriv_cosine_ex_full(float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t m, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize && idy < nTrail) { float a = 0; float b = 0; float c = 0; float bc, a_bbbc, a_bccc, batchsizenorm; float * q_iter = q + idx*m; float * d_iter = d + neg_list[idy * BATCHSIZE + idx] * m; float * q_iter_end = q_iter + m; float * q_iter_P = q_iter; float * d_iter_P = d_iter; float * q_iter_end_P = q_iter_end; while(q_iter < q_iter_end) { b += (*q_iter) * (*q_iter); c += (*d_iter) * (*d_iter); a += (*q_iter++) * (*d_iter++); } b = sqrtf(b); c = sqrtf(c); bc = b*c + eps; a_bbbc = a/(b*b*b*c + eps); a_bccc = a/(b*c*c*c + eps); batchsizenorm = 1.0f / batchsize; q_iter = q_iter_P; d_iter = d_iter_P; q_iter_end = q_iter_end_P; float * dcq_iter = dcq + idy * (BATCHSIZE * m) + idx * m; float * dcd_iter = dcd + idy * (BATCHSIZE * m) + idx * m; while(q_iter < q_iter_end) { *dcq_iter++ = (1.0f - *q_iter) * ( 1.0f + *q_iter) * (*d_iter / bc - *q_iter * a_bbbc) * batchsizenorm; *dcd_iter++ = (1.0f - *d_iter) * ( 1.0f + *d_iter) * (*q_iter / bc - *d_iter * a_bccc) * batchsizenorm; ++q_iter; ++d_iter; } } } void cuda_Deriv_Cosine_EX_Full( float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t m, float eps) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( nTrail + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_deriv_cosine_ex_full), dim3(block_tail) ,dim3(thread_tail) , 0, 0, q, d, neg_list, dcq, dcd, nTrail, BATCHSIZE, batchsize, m, eps); } __global__ void cuda_deriv_cosine_linear_ex_full(float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t m, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize && idy < nTrail) { float a = 0; float b = eps; float c = eps; uint32_t mIndex = neg_list[idy * BATCHSIZE + idx]; for(uint32_t i=0;i<m;i++) { a += q[idx * m + i] * d[mIndex * m + i]; b += q[idx * m + i] * q[idx * m + i]; c += d[mIndex * m + i] * d[mIndex * m + i]; } b = sqrtf(b); c = sqrtf(c); for(uint32_t i=0;i<m;i++) { dcq[idy * BATCHSIZE * m + idx * m + i] = (float)( (d[mIndex*m+i] * 1.0f / (b*c) - q[idx*m+i] * a * 1.0f / (b*b*b*c)) ); dcd[idy * BATCHSIZE * m + idx * m + i] = (float)( (q[idx*m+i] * 1.0f / (b*c) - d[mIndex*m+i] * a * 1.0f / (b*c*c*c)) ); dcq[idy * BATCHSIZE * m + idx * m + i] = dcq[idy * BATCHSIZE * m + idx * m + i] * 1.0f / batchsize; dcd[idy * BATCHSIZE * m + idx * m + i] = dcd[idy * BATCHSIZE * m + idx * m + i] * 1.0f / batchsize; } } } void cuda_Deriv_Cosine_Linear_EX_Full(float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t m, float eps) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( nTrail + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_deriv_cosine_linear_ex_full), dim3(block_tail) ,dim3(thread_tail) , 0, 0, q,d,neg_list,dcq,dcd, nTrail, BATCHSIZE, batchsize, m, eps); } __global__ void cuda_deriv_cosine_rectified_ex_full(float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t m, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize && idy < nTrail) { float a = 0; float b = eps; float c = eps; uint32_t mIndex = neg_list[idy * BATCHSIZE + idx]; for(uint32_t i=0;i<m;i++) { a += q[idx * m + i] * d[mIndex * m + i]; b += q[idx * m + i] * q[idx * m + i]; c += d[mIndex * m + i] * d[mIndex * m + i]; } b = sqrtf(b); c = sqrtf(c); for(uint32_t i=0;i<m;i++) { if(q[idx*m+i] == 0) { dcq[idy * BATCHSIZE * m + idx * m + i] = 0; } else { dcq[idy * BATCHSIZE * m + idx * m + i] = (float)( (d[mIndex*m+i] * 1.0f / (b*c) - q[idx*m+i] * a * 1.0f / (b*b*b*c)) ); } dcq[idy * BATCHSIZE * m + idx * m + i] = dcq[idy * BATCHSIZE * m + idx * m + i] * 1.0f / batchsize; if(d[mIndex*m+i] == 0) { dcd[idy * BATCHSIZE * m + idx * m + i] = 0; } else { dcd[idy * BATCHSIZE * m + idx * m + i] = (float)( (q[idx*m+i] * 1.0f / (b*c) - d[mIndex*m+i] * a * 1.0f / (b*c*c*c)) ); } dcd[idy * BATCHSIZE * m + idx * m + i] = dcd[idy * BATCHSIZE * m + idx * m + i] * 1.0f / batchsize; } } } void cuda_Deriv_Cosine_Rectified_EX_Full( float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t m, float eps) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( nTrail + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_deriv_cosine_rectified_ex_full), dim3(block_tail) ,dim3(thread_tail) , 0, 0, q, d, neg_list, dcq, dcd, nTrail, BATCHSIZE, batchsize, m, eps); } __global__ void cuda_matrix_weightadd_full(float * gpu_floats_a, float * gpu_floats_b, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < dimension && idy < batchsize) { for(int i=0;i<nTrail;i++) { gpu_floats_a[idy*dimension+idx] += keep * gpu_floats_b[ i * BATCHSIZE * dimension + idy * dimension + idx] * mweight[start + i * BATCHSIZE + idy]; } } } /// b add to a. void cuda_Matrix_WeightAdd_Full(float * gpu_floats_a, float * gpu_floats_b, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_matrix_weightadd_full), dim3(block_tail) ,dim3(thread_tail) , 0, 0, gpu_floats_a, gpu_floats_b, nTrail, BATCHSIZE, batchsize, dimension, mweight, start, keep); } __global__ void cuda_matrix_weightadd_ex_full(float * gpu_floats_a, float * gpu_floats_b, int * inver_neg_index, int * inver_neg_value, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < dimension && idy < batchsize) { for(int n=0; n<nTrial; n++) { int col_end = inver_neg_index[n * BATCHSIZE + idy]; int col_begin = 0; if(idy > 0) { col_begin = inver_neg_index[n * BATCHSIZE + idy - 1]; } float sum = 0; for(int i=col_begin; i<col_end; i++) { int row = inver_neg_value[n * BATCHSIZE + i]; sum += gpu_floats_b[n * BATCHSIZE * dimension + row * dimension + idx] * mweight[start + n * BATCHSIZE + row]; } gpu_floats_a[idy*dimension+idx] += keep * sum; } } } void cuda_Matrix_WeightAdd_EX_Full(float * gpu_floats_a, float * gpu_floats_b, int * inver_neg_index, int * inver_neg_value, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_matrix_weightadd_ex_full), dim3(block_tail) ,dim3(thread_tail) , 0, 0, gpu_floats_a, gpu_floats_b, inver_neg_index, inver_neg_value, nTrial, BATCHSIZE, batchsize, dimension, mweight, start, keep); } __global__ void cuda_cosine_similarity_subspace(float * a, float * b, float * c, uint32_t labelDim, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t subspaceDim, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize && idy < labelDim) { float sumxx = 0; float sumyy = 0; float sumxy = 0; int id_start = idx * (labelDim * subspaceDim) + idy * subspaceDim; for(uint32_t i=0;i<subspaceDim;i++) { sumxx += a[id_start + i] * a[id_start + i]; sumyy += b[id_start + i] * b[id_start + i]; sumxy += a[id_start + i] * b[id_start + i]; } c[idx * labelDim + idy] = (float)( sumxy * 1.0f / (sqrtf( (float)(sumxx * sumyy)) + eps) ); } } void cuda_Cosine_Similarity_SubSpace(float * a, float * b, float * c, uint32_t labelDim, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t subspaceDim, float eps) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( labelDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_cosine_similarity_subspace), dim3(block_tail) , dim3(thread_tail) , 0, 0, a, b, c, labelDim, BATCHSIZE, batchsize, subspaceDim, eps); } __global__ void cuda_softmax(float * a, float * b,uint32_t labelDim, uint32_t batchsize, float gamma) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize ) { float log_sum = 0; for(int i = 0; i<labelDim; i++) { float tmpa = gamma * a[idx * labelDim + i]; if( i == 0) { log_sum = tmpa; continue; } else { if(log_sum >= tmpa) { log_sum = log_sum + logf(1 + expf(gamma * (tmpa - log_sum))); } else { log_sum = tmpa + logf(1 + expf(gamma * (log_sum - tmpa))); } } } for(int i=0;i<labelDim; i++) { float tmpa = gamma * a[idx * labelDim + i]; b[idx * labelDim + i] = expf( tmpa - log_sum); } } } void cuda_SoftMax(float * a, float * b,uint32_t labelDim, uint32_t batchsize, float gamma) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_softmax), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, a, b, labelDim, batchsize, gamma); } __global__ void cuda_deriv_cosine_subspace(float * q, float * d, float * dcq, float * dcd, float * alpha, uint32_t act_type, uint32_t batchsize, uint32_t labelDim, uint32_t subspaceDim, float gamma, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize && idy < labelDim) { float alpha_v = gamma * alpha[idx * labelDim + idy]; int id_start = idx * labelDim * subspaceDim + idy * subspaceDim; float a = 0; float b = eps; float c = eps; for(uint32_t i=0;i<subspaceDim;i++) { a += q[id_start + i] * d[id_start + i]; b += q[id_start + i] * q[id_start + i]; c += d[id_start + i] * d[id_start + i]; } b = sqrtf(b); c = sqrtf(c); /// tanh function. if(act_type == 0) { for(uint32_t i=0;i<subspaceDim;i++) { dcq[id_start + i] = (float)( (1 - q[id_start + i]) * ( 1 + q[id_start + i]) * (d[id_start + i] * 1.0f / (b*c) - q[id_start + i] * a * 1.0f / (b*b*b*c)) ); dcd[id_start + i] = (float)( (1 - d[id_start + i]) * ( 1 + d[id_start + i]) * (q[id_start + i] * 1.0f / (b*c) - d[id_start + i] * a * 1.0f / (b*c*c*c)) ); dcq[id_start + i] = alpha_v * dcq[id_start + i] * 1.0f / batchsize; dcd[id_start + i] = alpha_v * dcd[id_start + i] * 1.0f / batchsize; } } /// linear function. else if(act_type == 1) { for(uint32_t i=0;i<subspaceDim;i++) { dcq[id_start + i] = (float)( (d[id_start + i] * 1.0f / (b*c) - q[id_start + i] * a * 1.0f / (b*b*b*c)) ); dcd[id_start + i] = (float)( (q[id_start + i] * 1.0f / (b*c) - d[id_start + i] * a * 1.0f / (b*c*c*c)) ); dcq[id_start + i] = alpha_v * dcq[id_start + i] * 1.0f / batchsize; dcd[id_start + i] = alpha_v * dcd[id_start + i] * 1.0f / batchsize; } } /// else if(act_type == 2) { for(uint32_t i=0;i<subspaceDim;i++) { if(fabsf(q[id_start + i]) < eps) { dcq[id_start + i] = 0; } else { dcq[id_start + i] = (float)( (d[id_start + i] * 1.0f / (b*c) - q[id_start +i] * a * 1.0f / (b*b*b*c)) ); } dcq[id_start + i] = alpha_v * dcq[id_start + i] * 1.0f / batchsize; if(fabsf(d[id_start + i]) < eps) { dcd[id_start + i ] =0; } else { dcd[id_start + i] = (float)( (q[ id_start + i] * 1.0f / (b*c) - d[ id_start + i ] * a * 1.0f / (b*c*c*c)) ); } dcd[id_start + i] = alpha_v * dcd[id_start + i] * 1.0f / batchsize; } } } } void cuda_Deriv_Cosine_Subspace( float * q, float * d, float * dcq, float * dcd, float * alpha, uint32_t act_type, uint32_t batchsize, uint32_t labelDim, uint32_t subspaceDim, float gamma, float eps) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( labelDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_deriv_cosine_subspace), dim3(block_tail) ,dim3(thread_tail) , 0, 0, q, d, dcq, dcd, alpha, act_type, batchsize, labelDim, subspaceDim, gamma, eps); } __global__ void cuda_deriv_innerproduct(float * q, float * d, float * dcq, float * dcd, float * alpha, uint32_t act_type, uint32_t batchsize, uint32_t Dim, float gamma, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float alpha_v = gamma * alpha[idx]; int id_start = idx * Dim; /// tanh function. if(act_type == 0) { for(uint32_t i=0;i<Dim;i++) { dcq[id_start + i] = (float)( (1 - q[id_start + i]) * ( 1 + q[id_start + i]) * d[id_start + i] * alpha_v * 1.0f ); dcd[id_start + i] = (float)( (1 - d[id_start + i]) * ( 1 + d[id_start + i]) * q[id_start + i] * alpha_v * 1.0f ); //dcq[id_start + i] = alpha_v * dcq[id_start + i] ; //dcd[id_start + i] = alpha_v * dcd[id_start + i] ; } } /// linear function. else if(act_type == 1) { for(uint32_t i=0;i<Dim;i++) { dcq[id_start + i] = (float)( d[id_start + i] * alpha_v * 1.0f ); dcd[id_start + i] = (float)( q[id_start + i] * alpha_v * 1.0f ); // dcq[id_start + i] = alpha_v * dcq[id_start + i] * 1.0f / batchsize; // dcd[id_start + i] = alpha_v * dcd[id_start + i] * 1.0f / batchsize; } } /// else if(act_type == 2) { for(uint32_t i=0;i<Dim;i++) { if(fabsf(q[id_start + i]) < eps) { dcq[id_start + i] = 0; } else { dcq[id_start + i] = (float)( d[id_start + i] * alpha_v * 1.0f ); } if(fabsf(d[id_start + i]) < eps) { dcd[id_start + i ] =0; } else { dcd[id_start + i] = (float)( q[id_start + i] * alpha_v * 1.0f ); } } } } } void cuda_Deriv_InnerProduct( float * q, float * d, float * dcq, float * dcd, float * alpha, uint32_t act_type, uint32_t batchsize, uint32_t Dim, float gamma, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; //dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); //dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( labelDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_deriv_innerproduct), dim3(nBlockPerGrid) ,dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, q, d, dcq, dcd, alpha, act_type, batchsize, Dim, gamma, eps); } __global__ void cuda_fillout_composite(float* data, uint32_t* feaIdx, float* compData, float* contextLT, uint32_t inputdim, uint32_t d1, uint32_t d2, uint32_t batchsize) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < inputdim && idy < batchsize) { if (idx < d1) { compData[idy * inputdim + idx] = data[idy * d1 + idx]; } else { uint32_t prodctfea = feaIdx[idy]; compData[idy * inputdim + idx] = contextLT[prodctfea * d2 + idx - d1]; } } } __global__ void cuda_fillout_composite_rev(float* data, float* compData, float* contextDeriv, uint32_t inputdim, uint32_t d1, uint32_t d2, uint32_t batchsize) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < inputdim && idy < batchsize) { if (idx < d1) { data[idy * d1 + idx] = compData[idy * inputdim + idx]; } else { contextDeriv[idy * d2 + idx - d1] = compData[idy * inputdim + idx]; } } } void cuda_FillOut_Composite(float* data, uint32_t* feaIdx, float* compData, float* context, uint32_t d1, uint32_t d2, uint32_t batchsize, uint32_t direction) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; uint32_t inputdim = d1 + d2; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((inputdim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); if (direction != 0) hipLaunchKernelGGL(( cuda_fillout_composite), dim3(block_tail), dim3(thread_tail), 0, 0, data, feaIdx, compData, context, inputdim, d1, d2, batchsize); else hipLaunchKernelGGL(( cuda_fillout_composite_rev), dim3(block_tail), dim3(thread_tail), 0, 0, data, compData, context, inputdim, d1, d2, batchsize); } __global__ void cuda_sparse_update_lookup(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv1, float * ltDeriv2, float * ltDeriv3, int seq1size, int sq1sq2, int IDnum, int Feature_Dimension, float lr) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < Feature_Dimension && idy < IDnum) { int colend = Fea_Idx[idy]; int colbegin = 0; if (idy > 0) colbegin = Fea_Idx[idy - 1]; float accu = 0; for (int t = colbegin; t < colend; t++) { int tidx = Seq[t]; if (tidx < seq1size) { accu += ltDeriv1[tidx*Feature_Dimension + idx]; } else if (tidx < sq1sq2) { accu += ltDeriv2[(tidx - seq1size)*Feature_Dimension + idx]; } else { accu += ltDeriv3[(tidx - sq1sq2)*Feature_Dimension + idx]; } } int wid = Fea_ID[idy]; int updatepos = wid*Feature_Dimension + idx; lookupt[updatepos] = lookupt[updatepos] - lr * accu; } } void cuda_Sparse_Update_Lookup(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv1, float * ltDeriv2, float * ltDeriv3, int seq1size, int seq2size, int IDnum, int Feature_Dimension, float lr) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (IDnum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); int sq1sq2 = seq1size + seq2size; hipLaunchKernelGGL(( cuda_sparse_update_lookup), dim3(block_tail), dim3(thread_tail), 0, 0, lookupt, Fea_ID, Fea_Idx, Seq, ltDeriv1, ltDeriv2, ltDeriv3, seq1size, sq1sq2, IDnum, Feature_Dimension, lr); } __global__ void cuda_sparse_update_lookup_ada(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv1, float * ltDeriv2, float * ltDeriv3, int seq1size, int sq1sq2, int IDnum, int Feature_Dimension, float lr, float * adaGrad, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < Feature_Dimension && idy < IDnum) { int colend = Fea_Idx[idy]; int colbegin = 0; if (idy > 0) colbegin = Fea_Idx[idy - 1]; float accu = 0; for (int t = colbegin; t < colend; t++) { int tidx = Seq[t]; if (tidx < seq1size) { accu += ltDeriv1[tidx*Feature_Dimension + idx]; } else if (tidx < sq1sq2) { accu += ltDeriv2[(tidx - seq1size)*Feature_Dimension + idx]; } else { accu += ltDeriv3[(tidx - sq1sq2)*Feature_Dimension + idx]; } } //int wid = Fea_ID[idy]; int updatepos = Fea_ID[idy] * Feature_Dimension + idx; float tempf = adaGrad[updatepos] + accu * accu; adaGrad[updatepos] = tempf; lookupt[updatepos] = lookupt[updatepos] - (lr * accu / (sqrtf(tempf)+eps)); } } void cuda_Sparse_Update_Lookup_Ada(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv1, float * ltDeriv2, float * ltDeriv3, int seq1size, int seq2size, int IDnum, int Feature_Dimension, float lr, float * adaGrad, float eps) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (IDnum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); int sq1sq2 = seq1size + seq2size; hipLaunchKernelGGL(( cuda_sparse_update_lookup_ada), dim3(block_tail), dim3(thread_tail), 0, 0, lookupt, Fea_ID, Fea_Idx, Seq, ltDeriv1, ltDeriv2, ltDeriv3, seq1size, sq1sq2, IDnum, Feature_Dimension, lr, adaGrad, eps); } __global__ void cuda_sparse_update_lookup_update(float * lookupt_update, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv1, float * ltDeriv2, float * ltDeriv3, int seq1size, int sq1sq2, int IDnum, int Feature_Dimension, float lr) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < Feature_Dimension && idy < IDnum) { int colend = Fea_Idx[idy]; int colbegin = 0; if (idy > 0) colbegin = Fea_Idx[idy - 1]; float accu = 0; for (int t = colbegin; t < colend; t++) { int tidx = Seq[t]; if (tidx < seq1size) { accu += ltDeriv1[tidx*Feature_Dimension + idx]; } else if (tidx < sq1sq2) { accu += ltDeriv2[(tidx - seq1size)*Feature_Dimension + idx]; } else { accu += ltDeriv3[(tidx - sq1sq2)*Feature_Dimension + idx]; } } int wid = Fea_ID[idy]; int updatepos = wid*Feature_Dimension + idx; lookupt_update[updatepos] = lookupt_update[updatepos] + lr * accu; } } void cuda_Sparse_Update_Lookup_Update(float * lookupt_update, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv1, float * ltDeriv2, float * ltDeriv3, int seq1size, int seq2size, int IDnum, int Feature_Dimension, float lr) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (IDnum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); int sq1sq2 = seq1size + seq2size; hipLaunchKernelGGL(( cuda_sparse_update_lookup_update), dim3(block_tail), dim3(thread_tail) , 0, 0, lookupt_update, Fea_ID, Fea_Idx, Seq, ltDeriv1, ltDeriv2, ltDeriv3, seq1size, sq1sq2, IDnum, Feature_Dimension, lr); } __global__ void cuda_sparse_update_lookup_sup(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv, int IDnum, int Feature_Dimension, float lr) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < Feature_Dimension && idy < IDnum) { int colend = Fea_Idx[idy]; int colbegin = 0; if (idy > 0) colbegin = Fea_Idx[idy - 1]; float accu = 0; for (int t = colbegin; t < colend; t++) { accu += ltDeriv[Seq[t] * Feature_Dimension + idx]; } //int wid = Fea_ID[idy]; int updatepos = Fea_ID[idy] * Feature_Dimension + idx; lookupt[updatepos] = lookupt[updatepos] - lr * accu; } } void cuda_Sparse_Update_Lookup_Sup(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv, int IDnum, int Feature_Dimension, float lr) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (IDnum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_sparse_update_lookup_sup), dim3(block_tail), dim3(thread_tail), 0, 0, lookupt, Fea_ID, Fea_Idx, Seq, ltDeriv, IDnum, Feature_Dimension, lr); } __global__ void cuda_sparse_update_lookup_ada_sup(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv, int IDnum, int Feature_Dimension, float lr, float * adaGrad, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < Feature_Dimension && idy < IDnum) { int colend = Fea_Idx[idy]; int colbegin = 0; if (idy > 0) colbegin = Fea_Idx[idy - 1]; float accu = 0; for (int t = colbegin; t < colend; t++) { accu += ltDeriv[Seq[t] * Feature_Dimension + idx]; } //int wid = Fea_ID[idy]; int updatepos = Fea_ID[idy] * Feature_Dimension + idx; float tempf = adaGrad[updatepos] + accu * accu; adaGrad[updatepos] = tempf; lookupt[updatepos] = lookupt[updatepos] - (lr * accu / (sqrtf(tempf) + eps)); } } void cuda_Sparse_Update_Lookup_Ada_Sup(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv, int IDnum, int Feature_Dimension, float lr, float * adaGrad, float eps) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (IDnum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_sparse_update_lookup_ada_sup), dim3(block_tail), dim3(thread_tail), 0, 0, lookupt, Fea_ID, Fea_Idx, Seq, ltDeriv, IDnum, Feature_Dimension, lr, adaGrad, eps); } __global__ void cuda_sparse_update_lookup_update_sup(float * lookupt_update, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv, int IDnum, int Feature_Dimension, float lr) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < Feature_Dimension && idy < IDnum) { int colend = Fea_Idx[idy]; int colbegin = 0; if (idy > 0) colbegin = Fea_Idx[idy - 1]; float accu = 0; for (int t = colbegin; t < colend; t++) { accu += ltDeriv[Seq[t] * Feature_Dimension + idx]; } //int wid = Fea_ID[idy]; int updatepos = Fea_ID[idy] * Feature_Dimension + idx; lookupt_update[updatepos] = lookupt_update[updatepos] + lr * accu; } } void cuda_Sparse_Update_Lookup_Update_Sup(float * lookupt_update, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv, int IDnum, int Feature_Dimension, float lr) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (IDnum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_sparse_update_lookup_update_sup), dim3(block_tail), dim3(thread_tail), 0, 0, lookupt_update, Fea_ID, Fea_Idx, Seq, ltDeriv, IDnum, Feature_Dimension, lr); } __global__ void cuda_init_float_array(float * target, float val, int size) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < size) { target[idx] = val; } } void cuda_Init_Float_Array(float * target, float val, int size) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (size + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; //dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); //dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( labelDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); hipLaunchKernelGGL(( cuda_init_float_array), dim3(nBlockPerGrid), dim3(DEFAULT_THREAD_PER_BLOCK) , 0, 0, target, val, size); } __global__ void cuda_lstm_input_batch_product(uint32_t * Word_Index, uint32_t Word_SeqLen, float * wordLT, float * weight, float * outputA, float * outputI, float * outputF, float * outputO, uint32_t Feature_dimension, uint32_t output_dimension) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < 4*output_dimension && idy < Word_SeqLen) { uint32_t wordIdx = Word_Index[idy]; uint32_t hdim = output_dimension / 2; uint32_t matrixIdx = idx / hdim; uint32_t inmatrixIdx = idx % hdim; uint32_t startpos = matrixIdx * hdim * Feature_dimension; float sum = 0; for (uint32_t i = 0; i < Feature_dimension; i++) { sum += wordLT[wordIdx*Feature_dimension + i] * weight[startpos + i*hdim + inmatrixIdx]; } if (matrixIdx < 2) outputA[idy * output_dimension + (matrixIdx % 2) * hdim + inmatrixIdx] = sum; else if (matrixIdx < 4) outputI[idy * output_dimension + (matrixIdx % 2) * hdim + inmatrixIdx] = sum; else if (matrixIdx < 6) outputF[idy * output_dimension + (matrixIdx % 2) * hdim + inmatrixIdx] = sum; else if (matrixIdx < 8) outputO[idy * output_dimension + (matrixIdx % 2) * hdim + inmatrixIdx] = sum; } } void cuda_LSTM_Input_Batch_Product(uint32_t * Word_Index, uint32_t Word_SeqLen, float * wordLT, float * weight, float * outputA, float * outputI, float * outputF, float * outputO, uint32_t Feature_dimension, uint32_t output_dimension) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((4*output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (Word_SeqLen + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_lstm_input_batch_product), dim3(block_tail), dim3(thread_tail), 0, 0, Word_Index, Word_SeqLen, wordLT, weight, outputA, outputI, outputF, outputO, Feature_dimension, output_dimension); } __global__ void cuda_lstm_sequence_forward(int * Smp_Index, int batchsize, float * reweight, float * bias, float * outputA, float * outputI, float * outputF, float * outputO, float * outputC, float * output, int output_dimension, int blocksize) { //uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; //uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = threadIdx.x; uint32_t idy = blockIdx.y; int wordEnd = Smp_Index[idy]; int wordBegin = 0; if (idy > 0) wordBegin = Smp_Index[idy - 1]; __shared__ float _h[300]; // to-do: hard-coded, should be configurable float bias_a; float bias_i; float bias_f; float bias_o; float _c; float h; if (blockIdx.x == 0) // forward lstm cell { //load bias for forward LSTM bias_a = bias[idx]; bias_i = bias[output_dimension + idx]; bias_f = bias[2 * output_dimension + idx]; bias_o = bias[3 * output_dimension + idx]; //__syncthreads(); // make sure all bias data be loaded before computation for (int w = wordBegin; w < wordEnd; w++) { float a = outputA[output_dimension*w + idx]; float i = outputI[output_dimension*w + idx]; float f = outputF[output_dimension*w + idx]; float o = outputO[output_dimension*w + idx]; if (w > wordBegin) { for (int j = 0; j < blockDim.x; j++) { a += reweight[j*blockDim.x + idx] * _h[j]; i += reweight[2 * blocksize + j*blockDim.x + idx] * _h[j]; f += reweight[4 * blocksize + j*blockDim.x + idx] * _h[j]; o += reweight[6 * blocksize + j*blockDim.x + idx] * _h[j]; } } a += bias_a; i += bias_i; f += bias_f; o += bias_o; a = tanhf(a); i = 1.0 / (1.0 + expf(-i)); f = 1.0 / (1.0 + expf(-f)); o = 1.0 / (1.0 + expf(-o)); if (w > wordBegin) _c = i * a + f * _c; else _c = i * a; h = o * tanhf(_c); __syncthreads(); // make sure all threads have read _h before overwrite it _h[idx] = h; __syncthreads(); // make sure all writes are done before any thread read it outputC[w * output_dimension + idx] = _c; outputA[w * output_dimension + idx] = a; outputI[w * output_dimension + idx] = i; outputF[w * output_dimension + idx] = f; outputO[w * output_dimension + idx] = o; output[w * output_dimension + idx] = h; } } else { //load bias for reverse LSTM uint32_t gidx = blockDim.x + idx; bias_a = bias[gidx]; bias_i = bias[output_dimension + gidx]; bias_f = bias[2 * output_dimension + gidx]; bias_o = bias[3 * output_dimension + gidx]; //__syncthreads(); // make sure all bias data be loaded before computation for (int w = wordEnd - 1; w >= wordBegin; w--) { float a = outputA[output_dimension*w + gidx]; float i = outputI[output_dimension*w + gidx]; float f = outputF[output_dimension*w + gidx]; float o = outputO[output_dimension*w + gidx]; if (w < wordEnd - 1) { for (int j = 0; j < blockDim.x; j++) { a += reweight[blocksize + j*blockDim.x + idx] * _h[j]; i += reweight[3 * blocksize + j*blockDim.x + idx] * _h[j]; f += reweight[5 * blocksize + j*blockDim.x + idx] * _h[j]; o += reweight[7 * blocksize + j*blockDim.x + idx] * _h[j]; } } a += bias_a; i += bias_i; f += bias_f; o += bias_o; a = tanhf(a); i = 1.0 / (1.0 + expf(-i)); f = 1.0 / (1.0 + expf(-f)); o = 1.0 / (1.0 + expf(-o)); if (w < wordEnd - 1) _c = i * a + f * _c; else _c = i * a; h = o * tanhf(_c); __syncthreads(); // make sure all threads have read _h before overwrite it _h[idx] = h; __syncthreads(); // make sure all writes are done before any thread read it outputC[w * output_dimension + gidx] = _c; outputA[w * output_dimension + gidx] = a; outputI[w * output_dimension + gidx] = i; outputF[w * output_dimension + gidx] = f; outputO[w * output_dimension + gidx] = o; output[w * output_dimension + gidx] = h; } } } void cuda_LSTM_Sequence_Forward(int * Smp_Index, int batchsize, float * reweight, float * bias, float * outputA, float * outputI, float * outputF, float * outputO, float * outputC, float * output, int output_dimension) { uint32_t hdim = output_dimension / 2; dim3 thread_tail(hdim, 1); dim3 block_tail(2, batchsize); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_lstm_sequence_forward), dim3(block_tail), dim3(thread_tail), 0, 0, Smp_Index, batchsize, reweight, bias, outputA, outputI, outputF, outputO, outputC, output, output_dimension, hdim*hdim); } __global__ void cuda_lstm_sequence_backward(int * Smp_Index, int batchsize, float * reweight, int * maxpooling_index, float * derivup, float * outputA, float * outputI, float * outputF, float * outputO, float * outputC, float * output, int output_dimension, int blocksize) { //uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; //uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = threadIdx.x; uint32_t idy = blockIdx.y; int wordEnd = Smp_Index[idy]; int wordBegin = 0; if (idy > 0) wordBegin = Smp_Index[idy - 1]; __shared__ float derivA[300]; // to-do: hard-coded, should be configurable __shared__ float derivI[300]; __shared__ float derivF[300]; __shared__ float derivO[300]; float _derivc, deriv_c; float derivh; float a, i, f, o, c_tanh; if (blockIdx.x == 1) // reverse lstm cell backprop { int gidx = blockDim.x + idx; int mpoolingIdx = maxpooling_index[output_dimension * idy + gidx]; for (int w = wordBegin; w < wordEnd; w++) { derivh = 0; if (mpoolingIdx == w) derivh += derivup[output_dimension * idy + gidx]; if (w > wordBegin) { for (int j = 0; j < blockDim.x; j++) { derivh += reweight[blocksize + idx*blockDim.x + j] * derivA[j]; derivh += reweight[3 * blocksize + idx*blockDim.x + j] * derivI[j]; derivh += reweight[5 * blocksize + idx*blockDim.x + j] * derivF[j]; derivh += reweight[7 * blocksize + idx*blockDim.x + j] * derivO[j]; } } c_tanh = tanhf(outputC[output_dimension*w + gidx]); o = outputO[output_dimension*w + gidx]; a = outputA[output_dimension*w + gidx]; i = outputI[output_dimension*w + gidx]; f = outputF[output_dimension*w + gidx]; float d_oinput = derivh * c_tanh * o * (1 - o); deriv_c = derivh * o * (1 + c_tanh) * (1 - c_tanh); if (w > wordBegin) deriv_c += f * _derivc; float d_finput; if (w < wordEnd - 1) d_finput = deriv_c * outputC[output_dimension*(w + 1) + gidx] * f * (1 - f); else d_finput = 0; float d_iinput = deriv_c * a * i * (1 - i); float d_ainput = deriv_c * i * (1 + a) * (1 - a); _derivc = deriv_c; outputA[output_dimension*w + gidx] = d_ainput; outputI[output_dimension*w + gidx] = d_iinput; outputF[output_dimension*w + gidx] = d_finput; outputO[output_dimension*w + gidx] = d_oinput; __syncthreads(); // make sure all threads have read _h before overwrite it derivA[idx] = d_ainput; derivI[idx] = d_iinput; derivF[idx] = d_finput; derivO[idx] = d_oinput; __syncthreads(); // make sure all writes are done before any thread read it } } else { //forward LSTM int mpoolingIdx = maxpooling_index[output_dimension * idy + idx]; for (int w = wordEnd - 1; w >= wordBegin; w--) { derivh = 0; if (mpoolingIdx == w) derivh += derivup[output_dimension * idy + idx]; if (w < wordEnd - 1) { for (int j = 0; j < blockDim.x; j++) { derivh += reweight[idx*blockDim.x + j] * derivA[j]; derivh += reweight[2 * blocksize + idx*blockDim.x + j] * derivI[j]; derivh += reweight[4 * blocksize + idx*blockDim.x + j] * derivF[j]; derivh += reweight[6 * blocksize + idx*blockDim.x + j] * derivO[j]; } } c_tanh = tanhf(outputC[output_dimension*w + idx]); o = outputO[output_dimension*w + idx]; a = outputA[output_dimension*w + idx]; i = outputI[output_dimension*w + idx]; f = outputF[output_dimension*w + idx]; float d_oinput = derivh * c_tanh * o * (1 - o); deriv_c = derivh * o * (1 + c_tanh) * (1 - c_tanh); if (w < wordEnd - 1) deriv_c += f * _derivc; float d_finput; if (w > wordBegin) d_finput = deriv_c * outputC[output_dimension*(w - 1) + idx] * f * (1 - f); else d_finput = 0; float d_iinput = deriv_c * a * i * (1 - i); float d_ainput = deriv_c * i * (1 + a) * (1 - a); _derivc = deriv_c; outputA[output_dimension*w + idx] = d_ainput; outputI[output_dimension*w + idx] = d_iinput; outputF[output_dimension*w + idx] = d_finput; outputO[output_dimension*w + idx] = d_oinput; __syncthreads(); // make sure all threads have read _h before overwrite it derivA[idx] = d_ainput; derivI[idx] = d_iinput; derivF[idx] = d_finput; derivO[idx] = d_oinput; __syncthreads(); // make sure all writes are done before any thread read it } } } void cuda_LSTM_Sequence_Backward(int * Smp_Index, int batchsize, float * reweight, int * maxpooling_index, float * derivup, float * outputA, float * outputI, float * outputF, float * outputO, float * outputC, float * output, int output_dimension) { int hdim = output_dimension / 2; dim3 thread_tail(hdim, 1); dim3 block_tail(2, batchsize); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_lstm_sequence_backward), dim3(block_tail), dim3(thread_tail), 0, 0, Smp_Index, batchsize, reweight, maxpooling_index, derivup, outputA, outputI, outputF, outputO, outputC, output, output_dimension, hdim*hdim); } __global__ void cuda_lstm_weight_deriv(uint32_t * Smp_Index1, uint32_t * Smp_Index2, uint32_t * Smp_Index3, uint32_t * Word_Index1, uint32_t * Word_Index2, uint32_t * Word_Index3, uint32_t Word_SeqLen1, uint32_t Word_SeqLen2, uint32_t Word_SeqLen3, float * wordLT, float * grad, float * outA1, float * outA2, float * outA3, float * outI1, float * outI2, float * outI3, float * outF1, float * outF2, float * outF3, float * outO1, float * outO2, float * outO3, float * h1, float * h2, float * h3, uint32_t fea_dimension, uint32_t output_dimension, uint32_t b_reweight, uint32_t hdim, uint32_t blocksize) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t ylimit = b_reweight == 1 ? hdim : fea_dimension; if (idx < 4 * output_dimension && idy < ylimit) { uint32_t rev = (idx / hdim) % 2; float gradient = 0.0; uint32_t relativeIdx = idx % hdim; uint32_t maxlen = Word_SeqLen1 > Word_SeqLen2 ? Word_SeqLen1 : Word_SeqLen2; if (Word_SeqLen3 > maxlen) maxlen = Word_SeqLen3; float * outD1, *outD2, *outD3; uint32_t startpos = 0; if (idx < output_dimension) { outD1 = outA1; outD2 = outA2; outD3 = outA3; startpos = rev == 0 ? 0 : blocksize; } else if (idx < 2 * output_dimension) { outD1 = outI1; outD2 = outI2; outD3 = outI3; startpos = rev == 0 ? 2 * blocksize : 3 * blocksize; } else if (idx < 3 * output_dimension) { outD1 = outF1; outD2 = outF2; outD3 = outF3; startpos = rev == 0 ? 4 * blocksize : 5 * blocksize; } else { outD1 = outO1; outD2 = outO2; outD3 = outO3; startpos = rev == 0 ? 6 * blocksize : 7 * blocksize; } uint32_t smpidx1 = 0, smpidx2 = 0, smpidx3 = 0; uint32_t boundary1 = Smp_Index1[0], boundary2 = Smp_Index2[0], boundary3 = Smp_Index3[0]; uint32_t firstw1 = 1, firstw2 = 1, firstw3 = 1; for (uint32_t pos = 0; pos < maxlen; pos++) { if (pos < Word_SeqLen1) { if (firstw1 == 1) { firstw1 = 0; if (rev == 0 && (b_reweight == 1 || outD1 == outF1)) // no computation since it is the first word, and there is no input for recurrent weight, or forget gate derivative is definitely zero (since s_t-1 = 0) continue; } else if (pos == boundary1 - 1) /// last word of the current sentence { if (!(boundary1 == Word_SeqLen1)) { boundary1 = Smp_Index1[++smpidx1]; firstw1 = 1; // next is the first word of the next sentence } if (rev == 1 && (b_reweight == 1 || outD1 == outF1)) continue; } if (b_reweight == 0) gradient += outD1[output_dimension * pos + rev * hdim + relativeIdx] * wordLT[fea_dimension * Word_Index1[pos] + idy]; else gradient += outD1[output_dimension * pos + rev * hdim + relativeIdx] * h1[output_dimension * (rev == 1 ? (pos + 1) : (pos - 1)) + rev * hdim + idy]; } if (pos < Word_SeqLen2) { if (firstw2 == 1) { firstw2 = 0; if (rev == 0 && (b_reweight == 1 || outD2 == outF2)) // no computation since it is the first word, and there is no input for recurrent weight, or forget gate derivative is definitely zero (since s_t-1 = 0) continue; } else if (pos == boundary2 - 1) /// last word of the current sentence { if (!(boundary2 == Word_SeqLen2)) { boundary2 = Smp_Index2[++smpidx2]; firstw2 = 1; // next is the first word of the next sentence } if (rev == 1 && (b_reweight == 1 || outD2 == outF2)) continue; } if (b_reweight == 0) gradient += outD2[output_dimension * pos + rev * hdim + relativeIdx] * wordLT[fea_dimension * Word_Index2[pos] + idy]; else gradient += outD2[output_dimension * pos + rev * hdim + relativeIdx] * h2[output_dimension * (rev == 1 ? (pos + 1) : (pos - 1)) + rev * hdim + idy]; } if (pos < Word_SeqLen3) { if (firstw3 == 1) { firstw3 = 0; if (rev == 0 && (b_reweight == 1 || outD3 == outF3)) // no computation since it is the first word, and there is no input for recurrent weight, or forget gate derivative is definitely zero (since s_t-1 = 0) continue; } else if (pos == boundary3 - 1) /// last word of the current sentence { if (!(boundary3 == Word_SeqLen3)) { boundary3 = Smp_Index3[++smpidx3]; firstw3 = 1; // next is the first word of the next sentence } if (rev == 1 && (b_reweight == 1 || outD3 == outF3)) continue; } if (b_reweight == 0) gradient += outD3[output_dimension * pos + rev * hdim + relativeIdx] * wordLT[fea_dimension * Word_Index3[pos] + idy]; else gradient += outD3[output_dimension * pos + rev * hdim + relativeIdx] * h3[output_dimension * (rev == 1 ? (pos + 1) : (pos - 1)) + rev * hdim + idy]; } } grad[startpos + hdim * idy + relativeIdx] = gradient; } } void cuda_LSTM_Weight_Deriv(uint32_t * Smp_Index1, uint32_t * Smp_Index2, uint32_t * Smp_Index3, uint32_t * Word_Index1, uint32_t * Word_Index2, uint32_t * Word_Index3, uint32_t Word_SeqLen1, uint32_t Word_SeqLen2, uint32_t Word_SeqLen3, float * wordLT, float * grad, float * outA1, float * outA2, float * outA3, float * outI1, float * outI2, float * outI3, float * outF1, float * outF2, float * outF3, float * outO1, float * outO2, float * outO3, float * h1, float * h2, float * h3, uint32_t fea_dimension, uint32_t output_dimension, uint32_t b_reweight) { uint32_t hdim = output_dimension / 2; uint32_t input_dim = 0; if (b_reweight == 1) input_dim = hdim; else input_dim = fea_dimension; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((4 * output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (input_dim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_lstm_weight_deriv), dim3(block_tail), dim3(thread_tail), 0, 0, Smp_Index1, Smp_Index2, Smp_Index3, Word_Index1, Word_Index2, Word_Index3, Word_SeqLen1, Word_SeqLen2, Word_SeqLen3, wordLT, grad, outA1, outA2, outA3, outI1, outI2, outI3, outF1, outF2, outF3, outO1, outO2, outO3, h1, h2, h3, fea_dimension, output_dimension, b_reweight, hdim, hdim*hdim); } __global__ void cuda_lstm_weight_deriv_sup(uint32_t * Smp_Index1, uint32_t * Word_Index1, uint32_t Word_SeqLen1, float * wordLT, float * grad, float * outA1, float * outI1, float * outF1, float * outO1, float * h1, uint32_t fea_dimension, uint32_t output_dimension, uint32_t b_reweight, uint32_t hdim, uint32_t blocksize) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t ylimit = b_reweight == 1 ? hdim : fea_dimension; if (idx < 4 * output_dimension && idy < ylimit) { uint32_t rev = (idx / hdim) % 2; float gradient = 0.0; uint32_t relativeIdx = idx % hdim; float * outD1; uint32_t startpos = 0; if (idx < output_dimension) { outD1 = outA1; startpos = rev == 0 ? 0 : blocksize; } else if (idx < 2 * output_dimension) { outD1 = outI1; startpos = rev == 0 ? 2 * blocksize : 3 * blocksize; } else if (idx < 3 * output_dimension) { outD1 = outF1; startpos = rev == 0 ? 4 * blocksize : 5 * blocksize; } else { outD1 = outO1; startpos = rev == 0 ? 6 * blocksize : 7 * blocksize; } uint32_t smpidx1 = 0; uint32_t boundary1 = Smp_Index1[0]; uint32_t firstw1 = 1; for (uint32_t pos = 0; pos < Word_SeqLen1; pos++) { if (firstw1 == 1) { firstw1 = 0; if (rev == 0 && (b_reweight == 1 || outD1 == outF1)) // no computation since it is the first word, and there is no input for recurrent weight, or forget gate derivative is definitely zero (since s_t-1 = 0) continue; } else if (pos == boundary1 - 1) /// last word of the current sentence { if (!(boundary1 == Word_SeqLen1)) { boundary1 = Smp_Index1[++smpidx1]; firstw1 = 1; // next is the first word of the next sentence } if (rev == 1 && (b_reweight == 1 || outD1 == outF1)) continue; } if (b_reweight == 0) gradient += outD1[output_dimension * pos + rev * hdim + relativeIdx] * wordLT[fea_dimension * Word_Index1[pos] + idy]; else gradient += outD1[output_dimension * pos + rev * hdim + relativeIdx] * h1[output_dimension * (rev == 1 ? (pos + 1) : (pos - 1)) + rev * hdim + idy]; } grad[startpos + hdim * idy + relativeIdx] = gradient; } } void cuda_LSTM_Weight_Deriv_Sup(uint32_t * Smp_Index1, uint32_t * Word_Index1, uint32_t Word_SeqLen1, float * wordLT, float * grad, float * outA1, float * outI1, float * outF1, float * outO1, float * h1, uint32_t fea_dimension, uint32_t output_dimension, uint32_t b_reweight) { uint32_t hdim = output_dimension / 2; uint32_t input_dim = 0; if (b_reweight == 1) input_dim = hdim; else input_dim = fea_dimension; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((4 * output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (input_dim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_lstm_weight_deriv_sup), dim3(block_tail), dim3(thread_tail), 0, 0, Smp_Index1, Word_Index1, Word_SeqLen1, wordLT, grad, outA1, outI1, outF1, outO1, h1, fea_dimension, output_dimension, b_reweight, hdim, hdim*hdim); } __global__ void cuda_lstm_bias_deriv(uint32_t Word_SeqLen1, uint32_t Word_SeqLen2, uint32_t Word_SeqLen3, float * grad, float * outA1, float * outA2, float * outA3, float * outI1, float * outI2, float * outI3, float * outF1, float * outF2, float * outF3, float * outO1, float * outO2, float * outO3, uint32_t output_dimension) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < 4 * output_dimension) { float gradient = 0.0; uint32_t maxlen = Word_SeqLen1 > Word_SeqLen2 ? Word_SeqLen1 : Word_SeqLen2; if (Word_SeqLen3 > maxlen) maxlen = Word_SeqLen3; float * outD1, *outD2, *outD3; if (idx < output_dimension) { outD1 = outA1; outD2 = outA2; outD3 = outA3; } else if (idx < 2 * output_dimension) { outD1 = outI1; outD2 = outI2; outD3 = outI3; } else if (idx < 3 * output_dimension) { outD1 = outF1; outD2 = outF2; outD3 = outF3; } else { outD1 = outO1; outD2 = outO2; outD3 = outO3; } uint32_t ridx = idx % output_dimension; for (uint32_t pos = 0; pos < maxlen; pos++) { if (pos < Word_SeqLen1) gradient += outD1[output_dimension * pos + ridx]; if (pos < Word_SeqLen2) gradient += outD2[output_dimension * pos + ridx]; if (pos < Word_SeqLen3) gradient += outD3[output_dimension * pos + ridx]; } grad[idx] = gradient; } } void cuda_LSTM_Bias_Deriv(uint32_t Word_SeqLen1, uint32_t Word_SeqLen2, uint32_t Word_SeqLen3, float * grad, float * outA1, float * outA2, float * outA3, float * outI1, float * outI2, float * outI3, float * outF1, float * outF2, float * outF3, float * outO1, float * outO2, float * outO3, uint32_t output_dimension) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (4 * output_dimension + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_lstm_bias_deriv), dim3(nBlockPerGrid), dim3(nThreadPerBlock), 0, 0, Word_SeqLen1, Word_SeqLen2, Word_SeqLen3, grad, outA1, outA2, outA3, outI1, outI2, outI3, outF1, outF2, outF3, outO1, outO2, outO3, output_dimension); } __global__ void cuda_lstm_bias_deriv_sup(uint32_t Word_SeqLen1, float * grad, float * outA1, float * outI1, float * outF1, float * outO1, uint32_t output_dimension) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < 4 * output_dimension) { float gradient = 0.0; float * outD1; if (idx < output_dimension) { outD1 = outA1; } else if (idx < 2 * output_dimension) { outD1 = outI1; } else if (idx < 3 * output_dimension) { outD1 = outF1; } else { outD1 = outO1; } uint32_t ridx = idx % output_dimension; for (uint32_t pos = 0; pos < Word_SeqLen1; pos++) { gradient += outD1[output_dimension * pos + ridx]; } grad[idx] = gradient; } } void cuda_LSTM_Bias_Deriv_Sup(uint32_t Word_SeqLen1, float * grad, float * outA1, float * outI1, float * outF1, float * outO1, uint32_t output_dimension) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (4 * output_dimension + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_lstm_bias_deriv_sup), dim3(nBlockPerGrid), dim3(nThreadPerBlock), 0, 0, Word_SeqLen1, grad, outA1, outI1, outF1, outO1, output_dimension); } __global__ void cuda_lstm_compute_wvderiv(uint32_t Word_SeqLen, float * weight, float * grad, float * outA, float * outI, float * outF, float * outO, uint32_t fea_dim, uint32_t output_dim, uint32_t hdim, uint32_t blocksize) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < fea_dim && idy < Word_SeqLen) { float gradient = 0.0; for (uint32_t di = 0; di < output_dim; di++) { if (di < hdim) { gradient += weight[idx * hdim + di] * outA[idy * output_dim + di]; gradient += weight[blocksize * 2 + idx * hdim + di] * outI[idy * output_dim + di]; gradient += weight[blocksize * 4 + idx * hdim + di] * outF[idy * output_dim + di]; gradient += weight[blocksize * 6 + idx * hdim + di] * outO[idy * output_dim + di]; } else { gradient += weight[blocksize + idx * hdim + (di - hdim)] * outA[idy * output_dim + di]; gradient += weight[blocksize * 3 + idx * hdim + (di - hdim)] * outI[idy * output_dim + di]; gradient += weight[blocksize * 5 + idx * hdim + (di - hdim)] * outF[idy * output_dim + di]; gradient += weight[blocksize * 7 + idx * hdim + (di - hdim)] * outO[idy * output_dim + di]; } } grad[idy * fea_dim + idx] = gradient; } } void cuda_LSTM_Compute_WVDeriv(uint32_t Word_SeqLen, float * weight, float * grad, float * outA, float * outI, float * outF, float * outO, uint32_t fea_dim, uint32_t output_dim) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((fea_dim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (Word_SeqLen + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; hipLaunchKernelGGL(( cuda_lstm_compute_wvderiv), dim3(block_tail), dim3(thread_tail), 0, 0, Word_SeqLen, weight, grad, outA, outI, outF, outO, fea_dim, output_dim, output_dim/2, (output_dim/2)*fea_dim); }
6720214781e58b106e41bb6915dc9feae063792c.cu
#include "stdafx.h" #ifndef __CUDACC__ #define __CUDACC__ #endif #include "device_functions.h" #include <iostream> #include <vector> #include <cuda_runtime.h> #include <cublas.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <cuda_surface_types.h> #include "device_launch_parameters.h" //device_launch_parameters.h" //#include <comutil.h> #include <math.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include "cublas_v2.h" #if defined(_WIN32) #include <comutil.h> using namespace _com_util; #pragma comment(lib, "cudart") #pragma comment(lib,"cublas.lib") #endif using namespace std; //using namespace _com_util; __global__ void cuda_matrix_ada_grad_decent(float * gpu_floats_a, float * gpu_floats_b, float * adaG, uint32_t m, uint32_t n, float lr, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n && idy < m) { int updateIdx = idy*n + idx; float gradval = gpu_floats_b[updateIdx]; float adaval = adaG[updateIdx] + gradval * gradval; adaG[updateIdx] = adaval; gpu_floats_a[updateIdx] = gpu_floats_a[updateIdx] - (lr*gradval/(sqrtf(adaval)+eps)); } } void cuda_Matrix_Ada_Grad_Decent(float * gpu_floats_a, float * gpu_floats_b, float * adaG, uint32_t m, uint32_t n, float lr, float eps) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_matrix_ada_grad_decent<<<block_tail, thread_tail>>>(gpu_floats_a, gpu_floats_b, adaG, m, n, lr, eps); } __global__ void cuda_matrix_grad_decent(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n, float lr) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n && idy < m) { gpu_floats_a[idy*n + idx] = gpu_floats_a[idy*n + idx] - gpu_floats_b[idy*n + idx] * lr; } } void cuda_Matrix_Grad_Decent(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n, float lr) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_matrix_grad_decent<<<block_tail, thread_tail>>>(gpu_floats_a, gpu_floats_b, m, n, lr); } __global__ void cuda_matrix_add(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n, float mweight) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < n && idy < m) { gpu_floats_a[idy*n+idx] = gpu_floats_a[idy*n+idx] + gpu_floats_b[idy*n+idx] * mweight; } } void cuda_Matrix_Add(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n, float mweight) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_matrix_add<<<block_tail ,thread_tail>>>(gpu_floats_a, gpu_floats_b, m, n,mweight); } __global__ void cuda_matrix_add_real(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n && idy < m) { gpu_floats_a[idy*n + idx] = gpu_floats_a[idy*n + idx] - gpu_floats_b[idy*n + idx]; } } void cuda_Matrix_Add_REAL(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_matrix_add_real<<<block_tail, thread_tail>>>(gpu_floats_a, gpu_floats_b, m, n); } __global__ void cuda_scale_matrix(float * gpu_floats_a, uint32_t m, uint32_t n, float mweight) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < n && idy < m) { gpu_floats_a[idy * n + idx] = gpu_floats_a[idy * n + idx] * mweight; //(float)log( (float)gpu_floats_a[idx]); } } void cuda_Scale_Matrix(float * gpu_floats_a, uint32_t m, uint32_t n, float mweight) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_scale_matrix<<<block_tail ,thread_tail >>>(gpu_floats_a, m, n, mweight); } __global__ void cuda_matrix_add_tanh(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < n && idy < m ) { uint32_t col = idx ; //% n; float t = gpu_floats_a[idy * n + idx] + gpu_floats_b[col]; gpu_floats_a[idy * n + idx] = tanhf(t); } } void cuda_Matrix_Add_Tanh(float * gpu_floats_a, float * gpu_floats_b, uint32_t m, uint32_t n) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_matrix_add_tanh<<<block_tail ,thread_tail >>>(gpu_floats_a,gpu_floats_b, m, n); } __global__ void cuda_matrix_add_vector(float * gpu_floats_a, float * gpu_floats_b, uint32_t batchsize, uint32_t dimension) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < dimension && idy < batchsize ) { gpu_floats_a[idy * dimension + idx] = gpu_floats_a[idy * dimension + idx] + gpu_floats_b[idx]; } } void cuda_Matrix_Add_Vector(float * gpu_floats_a, float * gpu_floats_b, uint32_t batchsize, uint32_t dimension) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_matrix_add_vector<<<block_tail ,thread_tail >>>(gpu_floats_a,gpu_floats_b, batchsize, dimension); } __global__ void cuda_matrix_rectified_vector(float * gpu_floats_a, float * gpu_floats_b, uint32_t batchsize, uint32_t dimension) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < dimension && idy < batchsize ) { gpu_floats_a[idy * dimension + idx] = gpu_floats_a[idy * dimension + idx] + gpu_floats_b[idx]; if(gpu_floats_a[idy * dimension + idx] < 0) { gpu_floats_a[idy * dimension + idx] = 0; } } } void cuda_Matrix_Rectified_Vector(float * gpu_floats_a, float * gpu_floats_b, uint32_t batchsize, uint32_t dimension) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_matrix_rectified_vector<<<block_tail ,thread_tail >>>(gpu_floats_a,gpu_floats_b, batchsize, dimension); } __global__ void cuda_deriv_cosine(float * q, float * d, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float a = 0; float b = eps; float c = eps; for(uint32_t i=0;i<m;i++) { a += q[idx * m + i] * d[idx * m + i]; b += q[idx * m + i] * q[idx * m + i]; c += d[idx * m + i] * d[idx * m + i]; } b = sqrtf(b); c = sqrtf(c); for(uint32_t i=0;i<m;i++) { dcq[idx * m + i] = (float)( (1 - q[idx * m + i]) * ( 1 + q[idx * m + i]) * (d[idx*m+i] * 1.0f / (b*c) - q[idx*m+i] * a * 1.0f / (b*b*b*c)) ); dcd[idx * m + i] = (float)( (1 - d[idx * m + i]) * ( 1 + d[idx * m + i]) * (q[idx*m+i] * 1.0f / (b*c) - d[idx*m+i] * a * 1.0f / (b*c*c*c)) ); dcq[idx * m + i] = dcq[idx * m + i] * 1.0f / batchsize; dcd[idx * m + i] = dcd[idx * m + i] * 1.0f / batchsize; } } } void cuda_Deriv_Cosine( float * q, float * d, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_deriv_cosine<<<nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK>>>(q,d,dcq,dcd,batchsize,m,eps); } __global__ void cuda_deriv_dis(float * s1deriv, float * s2deriv, float * s3deriv, float * s1, float * s2, float * s3, float * dis, uint32_t batchsize, uint32_t m, float margin) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < m && idy < 3*batchsize) { uint32_t sel = idy / batchsize; uint32_t pos = idy % batchsize; if (dis[pos * 2 + 1] - dis[pos * 2] >= margin) { if (sel == 0) s1deriv[pos*m + idx] = 0; else if (sel == 1) s2deriv[pos*m + idx] = 0; else s3deriv[pos*m + idx] = 0; return; } float tem1, tem2; if (sel == 0) { //s1 tem1 = s1[pos*m + idx]; tem2 = (tem1 - s2[pos*m + idx]) / dis[pos * 2] - (tem1 - s3[pos*m + idx]) / dis[pos * 2 + 1]; tem2 = tem2 * (1 - tem1) * (1 + tem1); s1deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } else if (sel == 1) { //s2 tem1 = s2[pos*m + idx]; tem2 = (tem1 - s1[pos*m + idx]) / dis[pos * 2]; tem2 = tem2 * (1 - tem1) * (1 + tem1); s2deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } else { //s3 tem1 = s3[pos*m + idx]; tem2 = (s1[pos*m + idx] - tem1) / dis[pos * 2 + 1]; tem2 = tem2 * (1 - tem1) * (1 + tem1); s3deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } } } void cuda_Deriv_Dis(float * s1deriv, float * s2deriv, float * s3deriv, float * s1, float * s2, float * s3, float * dis, uint32_t batchsize, uint32_t m, float margin) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize*3 + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_deriv_dis<<<block_tail, thread_tail>>>(s1deriv, s2deriv, s3deriv, s1, s2, s3, dis, batchsize, m, margin); } __global__ void cuda_deriv_dis_linear(float * s1deriv, float * s2deriv, float * s3deriv, float * s1, float * s2, float * s3, float * dis, uint32_t batchsize, uint32_t m, float margin) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < m && idy < 3 * batchsize) { uint32_t sel = idy / batchsize; uint32_t pos = idy % batchsize; if (dis[pos * 2 + 1] - dis[pos * 2] >= margin) { if (sel == 0) s1deriv[pos*m + idx] = 0; else if (sel == 1) s2deriv[pos*m + idx] = 0; else s3deriv[pos*m + idx] = 0; return; } float tem1, tem2; if (sel == 0) { //s1 tem1 = s1[pos*m + idx]; tem2 = (tem1 - s2[pos*m + idx]) / dis[pos * 2] - (tem1 - s3[pos*m + idx]) / dis[pos * 2 + 1]; //tem2 = tem2 * (1 - tem1) * (1 + tem1); s1deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } else if (sel == 1) { //s2 tem1 = s2[pos*m + idx]; tem2 = (tem1 - s1[pos*m + idx]) / dis[pos * 2]; //tem2 = tem2 * (1 - tem1) * (1 + tem1); s2deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } else { //s3 tem1 = s3[pos*m + idx]; tem2 = (s1[pos*m + idx] - tem1) / dis[pos * 2 + 1]; //tem2 = tem2 * (1 - tem1) * (1 + tem1); s3deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } } } void cuda_Deriv_Dis_Linear(float * s1deriv, float * s2deriv, float * s3deriv, float * s1, float * s2, float * s3, float * dis, uint32_t batchsize, uint32_t m, float margin) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize * 3 + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_deriv_dis_linear<<<block_tail, thread_tail>>>(s1deriv, s2deriv, s3deriv, s1, s2, s3, dis, batchsize, m, margin); } __global__ void cuda_deriv_dis_rectified(float * s1deriv, float * s2deriv, float * s3deriv, float * s1, float * s2, float * s3, float * dis, uint32_t batchsize, uint32_t m, float margin, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < m && idy < 3 * batchsize) { uint32_t sel = idy / batchsize; uint32_t pos = idy % batchsize; //check if there is error if (dis[pos * 2 + 1] - dis[pos * 2] >= margin) { if (sel == 0) s1deriv[pos*m + idx] = 0; else if (sel == 1) s2deriv[pos*m + idx] = 0; else s3deriv[pos*m + idx] = 0; return; } float tem1, tem2; if (sel == 0) { //s1 tem1 = s1[pos*m + idx]; if (fabsf(tem1) < eps) { s1deriv[pos*m + idx] = 0; } else { tem2 = (tem1 - s2[pos*m + idx]) / dis[pos * 2] - (tem1 - s3[pos*m + idx]) / dis[pos * 2 + 1]; //tem2 = tem2 * (1 - tem1) * (1 + tem1); s1deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } } else if (sel == 1) { //s2 tem1 = s2[pos*m + idx]; if (fabsf(tem1) < eps) { s2deriv[pos*m + idx] = 0; } else { tem2 = (tem1 - s1[pos*m + idx]) / dis[pos * 2]; //tem2 = tem2 * (1 - tem1) * (1 + tem1); s2deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } } else { //s3 tem1 = s3[pos*m + idx]; if (fabsf(tem1) < eps) { s3deriv[pos*m + idx] = 0; } else { tem2 = (s1[pos*m + idx] - tem1) / dis[pos * 2 + 1]; //tem2 = tem2 * (1 - tem1) * (1 + tem1); s3deriv[pos*m + idx] = tem2 * 1.0f / batchsize; } } } } void cuda_Deriv_Dis_Rectified(float * s1deriv, float * s2deriv, float * s3deriv, float * s1, float * s2, float * s3, float * dis, uint32_t batchsize, uint32_t m, float margin, float eps) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize * 3 + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_deriv_dis_rectified << <block_tail, thread_tail >> >(s1deriv, s2deriv, s3deriv, s1, s2, s3, dis, batchsize, m, margin, eps); } __global__ void cuda_calc_euclideandis(float * s1, float * s2, float * s3, float * res, uint32_t batchsize, uint32_t m, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < 2*batchsize) { int row = idx / batchsize; // first row(0): distance between s1 and s2; second row(1): distance between s1 and s3 int col = idx % batchsize; float * s = row > 0 ? s3 : s2; float tem; float dist = eps; for (uint32_t i = 0; i<m; i++) { tem = s1[col * m + i] - s[col * m + i]; dist += tem*tem; } dist = sqrtf(dist); res[2 * col + row] = dist; } } void cuda_Calc_EuclideanDis(float * s1, float * s2, float * s3, float * res, uint32_t batchsize, uint32_t m, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (2 * batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_calc_euclideandis<<<nBlockPerGrid, DEFAULT_THREAD_PER_BLOCK>>>(s1, s2, s3, res, batchsize, m, eps); } __global__ void cuda_deriv_cosine_linear(float * q, float * d, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float a = 0; float b = eps; float c = eps; for(uint32_t i=0;i<m;i++) { a += q[idx * m + i] * d[idx * m + i]; b += q[idx * m + i] * q[idx * m + i]; c += d[idx * m + i] * d[idx * m + i]; } b = sqrtf(b); c = sqrtf(c); for(uint32_t i=0;i<m;i++) { dcq[idx * m + i] = (float)( (d[idx*m+i] * 1.0f / (b*c) - q[idx*m+i] * a * 1.0f / (b*b*b*c)) ); dcd[idx * m + i] = (float)( (q[idx*m+i] * 1.0f / (b*c) - d[idx*m+i] * a * 1.0f / (b*c*c*c)) ); dcq[idx * m + i] = dcq[idx * m + i] * 1.0f / batchsize; dcd[idx * m + i] = dcd[idx * m + i] * 1.0f / batchsize; } } } void cuda_Derive_Cosine_Linear(float * q, float * d, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_deriv_cosine_linear<<< nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(q,d,dcq,dcd,batchsize,m,eps); } __global__ void cuda_deriv_cosine_rectified(float * q, float * d, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float a = 0; float b = eps; float c = eps; for(uint32_t i=0;i<m;i++) { a += q[idx * m + i] * d[idx * m + i]; b += q[idx * m + i] * q[idx * m + i]; c += d[idx * m + i] * d[idx * m + i]; } b = sqrtf(b); c = sqrtf(c); for(uint32_t i=0;i<m;i++) { if(fabsf(q[idx * m + i]) < eps) { dcq[idx * m + i] = 0; } else { dcq[idx * m + i] = (float)( (d[idx*m+i] * 1.0f / (b*c) - q[idx*m+i] * a * 1.0f / (b*b*b*c)) ); } dcq[idx * m + i] = dcq[idx * m + i] * 1.0f / batchsize; if(fabsf(d[idx * m + i]) < eps) { dcd[idx * m + i ] =0; } else { dcd[idx * m + i] = (float)( (q[idx*m+i] * 1.0f / (b*c) - d[idx*m+i] * a * 1.0f / (b*c*c*c)) ); } dcd[idx * m + i] = dcd[idx * m + i] * 1.0f / batchsize; } } } void cuda_Derive_Cosine_Rectified(float * q, float * d, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_deriv_cosine_rectified<<< nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(q,d,dcq,dcd,batchsize,m,eps); } //optimized version -- hxd __global__ void cuda_deriv_cosine_ex(float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float a = 0; float b = 0; float c = 0; float bc, a_bbbc, a_bccc, batchsizenorm; float * q_iter = q + idx*m; float * d_iter = d + neg_list[idx]*m; float * q_iter_end = q_iter + m; while(q_iter < q_iter_end) { b += (*q_iter) * (*q_iter); c += (*d_iter) * (*d_iter); a += (*q_iter++) * (*d_iter++); } b = sqrtf(b); c = sqrtf(c); bc = b*c + eps; a_bbbc = a/(b*b*b*c + eps); a_bccc = a/(b*c*c*c + eps); batchsizenorm = 1.0f / batchsize; q_iter = q + idx*m; d_iter = d + neg_list[idx]*m; q_iter_end = q_iter + m; float * dcq_iter = dcq + idx*m; float * dcd_iter = dcd + idx*m; while(q_iter < q_iter_end) { *dcq_iter++ = (1.0f - *q_iter) * ( 1.0f + *q_iter) * (*d_iter / bc - *q_iter * a_bbbc) * batchsizenorm; *dcd_iter++ = (1.0f - *d_iter) * ( 1.0f + *d_iter) * (*q_iter / bc - *d_iter * a_bccc) * batchsizenorm; ++q_iter; ++d_iter; } } } void cuda_Deriv_Cosine_EX( float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_deriv_cosine_ex<<<nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(q,d,neg_list,dcq,dcd,batchsize,m,eps); } __global__ void cuda_deriv_cosine_linear_ex(float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float a = 0; float b = eps; float c = eps; uint32_t mIndex = neg_list[idx]; for(uint32_t i=0;i<m;i++) { a += q[idx * m + i] * d[mIndex * m + i]; b += q[idx * m + i] * q[idx * m + i]; c += d[mIndex * m + i] * d[mIndex * m + i]; } b = sqrtf(b); c = sqrtf(c); for(uint32_t i=0;i<m;i++) { dcq[idx * m + i] = (float)( (d[mIndex*m+i] * 1.0f / (b*c) - q[idx*m+i] * a * 1.0f / (b*b*b*c)) ); dcd[idx * m + i] = (float)( (q[idx*m+i] * 1.0f / (b*c) - d[mIndex*m+i] * a * 1.0f / (b*c*c*c)) ); dcq[idx * m + i] = dcq[idx * m + i] * 1.0f / batchsize; dcd[idx * m + i] = dcd[idx * m + i] * 1.0f / batchsize; } } } void cuda_Deriv_Cosine_Linear_EX( float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_deriv_cosine_linear_ex<<<nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(q,d,neg_list,dcq,dcd,batchsize,m,eps); } __global__ void cuda_deriv_cosine_rectified_ex(float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float a = 0; float b = eps; float c = eps; uint32_t mIndex = neg_list[idx]; for(uint32_t i=0;i<m;i++) { a += q[idx * m + i] * d[mIndex * m + i]; b += q[idx * m + i] * q[idx * m + i]; c += d[mIndex * m + i] * d[mIndex * m + i]; } b = sqrtf(b); c = sqrtf(c); for(uint32_t i=0;i<m;i++) { if(q[idx*m+i] == 0) { dcq[idx * m + i] = 0; } else { dcq[idx * m + i] = (float)( (d[mIndex*m+i] * 1.0f / (b*c) - q[idx*m+i] * a * 1.0f / (b*b*b*c)) ); } dcq[idx * m + i] = dcq[idx * m + i] * 1.0f / batchsize; if(d[mIndex*m+i] == 0) { dcd[idx * m + i] = 0; } else { dcd[idx * m + i] = (float)( (q[idx*m+i] * 1.0f / (b*c) - d[mIndex*m+i] * a * 1.0f / (b*c*c*c)) ); } dcd[idx * m + i] = dcd[idx * m + i] * 1.0f / batchsize; } } } void cuda_Deriv_Cosine_Rectified_EX( float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t batchsize, uint32_t m, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_deriv_cosine_rectified_ex<<<nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(q,d,neg_list,dcq,dcd,batchsize,m,eps); } __global__ void cuda_deriv_tanh(float * delta, float * layer_output, uint32_t batchsize, uint32_t m) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < m && idy < batchsize ) { delta[idy * m + idx] = delta[idy * m +idx] * (1 - layer_output[idy * m + idx]) * ( 1 + layer_output[idy * m + idx]); } } void cuda_Deriv_Tanh( float * delta, float * layer_output, uint32_t batchsize, uint32_t m) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize * m + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_deriv_tanh<<<block_tail ,thread_tail >>>(delta, layer_output, batchsize, m); } __global__ void cuda_deriv_rectified(float * delta, float * layer_output, uint32_t batchsize, uint32_t m) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < m && idy < batchsize ) { if(layer_output[idy * m + idx] == 0) { delta[idy * m + idx] = 0; // delta[idy * m +idx] ; } } } void cuda_Deriv_Rectified( float * delta, float * layer_output, uint32_t batchsize, uint32_t m) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize * m + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_deriv_rectified<<<block_tail ,thread_tail >>>(delta, layer_output, batchsize, m); } //optimized version -- hxd __global__ void cuda_matrix_multipy(float * delta, float * weight, float * delta_low, uint32_t batchsize, uint32_t m, uint32_t n, uint32_t inverse) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if(idx < n && idy < batchsize) { //uint32_t row = idy; // / n; //uint32_t col = idx; // % n; float sum = 0; if(inverse == 1) { float * d_iter = delta + (idy * m); float * w_iter = weight + (idx * m); float * d_end_pt = d_iter + m; while(d_iter < d_end_pt) { sum += (*d_iter++) * (*w_iter++); } } else { float * d_iter = delta + (idy * m); float * w_iter = weight + idx; float * d_end_pt = d_iter + m; while(d_iter < d_end_pt) { sum += (*d_iter++) * (*w_iter); w_iter += n; } } delta_low[idy * n + idx] = sum; } } void cuda_Matrix_Multipy(float * delta, float * weight, float * delta_low, uint32_t batchsize, uint32_t m, uint32_t n, uint32_t inverse) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_matrix_multipy<<<block_tail ,thread_tail >>>(delta, weight, delta_low, batchsize, m, n, inverse); } __global__ void cuda_cosine_similarity(float * a, float * b, float * c, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t mindex, uint32_t batchsize, uint32_t dimension, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float sumxx = 0; float sumyy = 0; float sumxy = 0; for(uint32_t i=0;i<dimension;i++) { sumxx += a[idx * dimension + i] * a[idx * dimension + i]; sumyy += b[idx * dimension + i] * b[idx * dimension + i]; sumxy += a[idx * dimension + i] * b[idx * dimension + i]; } c[mindex * BATCHSIZE + idx] = (float)( sumxy * 1.0f / (sqrtf( (float)(sumxx * sumyy)) + eps) ); } } void cuda_Cosine_Similarity(float * a, float * b, float * c, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t mindex, uint32_t batchsize, uint32_t dimension, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_cosine_similarity<<<nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(a,b,c,nTrial,BATCHSIZE,mindex,batchsize, dimension, eps); } __global__ void cuda_innerproduct_similarity(float * a, float * b, float * c, uint32_t batchsize, uint32_t dimension) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float sumxy = 0; for(uint32_t i=0;i<dimension;i++) { sumxy += a[idx * dimension + i] * b[idx * dimension + i]; } c[idx] = (float)(sumxy * 1.0f); } } void cuda_InnerProduct_Similarity(float * a, float * b, float * c, uint32_t batchsize, uint32_t dimension) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_innerproduct_similarity<<<nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(a, b, c, batchsize, dimension); } //optimized version -- hxd __global__ void cuda_cosine_similarity_ex(float * a, float * b,uint32_t * neg_list, float * c, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t mindex, uint32_t batchsize, uint32_t dimension, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float sumxx = 0; float sumyy = 0; float sumxy = 0; float * a_iter = a + (idx * dimension); float * b_iter = b + (neg_list[idx] * dimension); float * a_iter_end = a_iter + dimension; while(a_iter < a_iter_end) { sumxx += (*a_iter) * (*a_iter); sumyy += (*b_iter) * (*b_iter); sumxy += (*a_iter++) * (*b_iter++); } c[mindex * BATCHSIZE + idx] = (float)( sumxy / ((float)sqrtf(sumxx * sumyy) + eps) ); } } void cuda_Cosine_Similarity_EX(float * a, float * b, uint32_t * neg_list, float * c, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t mindex, uint32_t batchsize, uint32_t dimension, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_cosine_similarity_ex<<<nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(a,b,neg_list,c,nTrial,BATCHSIZE,mindex,batchsize, dimension, eps); } __global__ void cuda_cal_alpha(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < (nTrial-1)*batchsize) { uint32_t row = idx / batchsize; uint32_t col = idx % batchsize; alpha[row * BATCHSIZE + col + BATCHSIZE] = expf( (float)(-gamma * (alpha[col] - alpha[row * BATCHSIZE + col + BATCHSIZE]))) ; } } __global__ void cuda_cal_alpha_sum(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma, uint32_t init) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float sum = init; for(uint32_t i=1;i<nTrial;i++) { sum += alpha[i * BATCHSIZE + idx]; } alpha[idx] = sum; } } __global__ void cuda_cal_alpha_norm(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < (nTrial-1)*batchsize) { uint32_t row = idx / batchsize; uint32_t col = idx % batchsize; alpha[row * BATCHSIZE + col + BATCHSIZE] = (float)((gamma * alpha[row * BATCHSIZE + col + BATCHSIZE])/ alpha[col]); //expf( (float)(-gamma * (alpha[col] - alpha[row * BATCHSIZE + col + BATCHSIZE]))) ; } } void cuda_Calculate_Alpha(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid_1 = ((nTrial-1)*batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_cal_alpha<<<nBlockPerGrid_1 ,DEFAULT_THREAD_PER_BLOCK >>>(alpha, nTrial,BATCHSIZE,batchsize,gamma); uint32_t nBlockPerGrid_2 = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_cal_alpha_sum<<<nBlockPerGrid_2 ,DEFAULT_THREAD_PER_BLOCK >>>(alpha, nTrial,BATCHSIZE,batchsize,gamma, 1); cuda_cal_alpha_norm<<<nBlockPerGrid_1 ,DEFAULT_THREAD_PER_BLOCK >>>(alpha, nTrial,BATCHSIZE,batchsize,gamma); cuda_cal_alpha_sum<<<nBlockPerGrid_2 ,DEFAULT_THREAD_PER_BLOCK >>>(alpha, nTrial,BATCHSIZE,batchsize,gamma, 0); } __global__ void cuda_cal_alpha_norm_MXE(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < (nTrial-1)*batchsize) { uint32_t row = idx / batchsize; uint32_t col = idx % batchsize; alpha[row * BATCHSIZE + col + BATCHSIZE] = (float)((gamma * alpha[row * BATCHSIZE + col + BATCHSIZE])/ alpha[col]/ alpha[col]); //expf( (float)(-gamma * (alpha[col] - alpha[row * BATCHSIZE + col + BATCHSIZE]))) ; } } void cuda_Calculate_Alpha_MXE(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid_1 = ((nTrial-1)*batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_cal_alpha<<<nBlockPerGrid_1 ,DEFAULT_THREAD_PER_BLOCK >>>(alpha, nTrial,BATCHSIZE,batchsize,gamma); uint32_t nBlockPerGrid_2 = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_cal_alpha_sum<<<nBlockPerGrid_2 ,DEFAULT_THREAD_PER_BLOCK >>>(alpha, nTrial,BATCHSIZE,batchsize,gamma, 1); cuda_cal_alpha_norm_MXE<<<nBlockPerGrid_1 ,DEFAULT_THREAD_PER_BLOCK >>>(alpha, nTrial,BATCHSIZE,batchsize,gamma); cuda_cal_alpha_sum<<<nBlockPerGrid_2 ,DEFAULT_THREAD_PER_BLOCK >>>(alpha, nTrial,BATCHSIZE,batchsize,gamma, 0); } __global__ void cuda_cal_alpha_PAIRRANK(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float msum = 0; for(int n = 1; n < nTrial; n++) { float a = gamma * (1.0f - 1.0f / (1 + expf(- gamma * (alpha[idx] - alpha[n * BATCHSIZE + idx] )))); alpha[n * BATCHSIZE + idx] = a; msum += a; } alpha[idx] = msum; } } void cuda_Calculate_Alpha_PAIRRANK(float * alpha, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_cal_alpha_PAIRRANK<<<nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(alpha, nTrial, BATCHSIZE, batchsize, gamma); } __global__ void cuda_cal_alpha_nce(float * alpha, float* dist, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { alpha[idx] = gamma - gamma / (1.0f + (nTrial - 1) * expf(dist[idx] - gamma * alpha[idx] + gamma)); //+gamma is from hxd, sd doesn't have this } else if(idx < nTrial*batchsize) { uint32_t row = idx / batchsize; uint32_t col = idx % batchsize; alpha[row * BATCHSIZE + col] = gamma / (1.0f + (nTrial - 1) * expf(dist[row * BATCHSIZE + col] - gamma * alpha[row * BATCHSIZE + col] + gamma)); //+gamma is from hxd, sd doesn't have this } } void cuda_Calculate_Alpha_NCE(float* alpha, float* dist, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (nTrial*batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_cal_alpha_nce<<<nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(alpha, dist, nTrial,BATCHSIZE,batchsize,gamma); } __global__ void cuda_cal_alpha_nce2(float * alpha, float* dist, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < nTrial*batchsize) { uint32_t row = idx / batchsize; uint32_t col = idx % batchsize; float s = 1.0f / (1.0f + (nTrial - 1) * expf(dist[row * BATCHSIZE + col] - gamma * alpha[row * BATCHSIZE + col] + gamma)); //+gamma is from hxd, sd doesn't have this alpha[row * BATCHSIZE + col] = gamma * s * (1.0f - s); } } void cuda_Calculate_Alpha_NCE2(float* alpha, float* dist, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, float gamma) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (nTrial*batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_cal_alpha_nce2<<<nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(alpha, dist, nTrial,BATCHSIZE,batchsize,gamma); } __global__ void cuda_fillout_dist_nce(float* dist, uint32_t* neg_list, uint32_t nTrailPlus1, uint32_t BATCH_SIZE, uint32_t mindex, uint32_t batchsize) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { uint32_t mtindex = neg_list[idx]; dist[mindex * BATCH_SIZE + idx] = dist[mtindex]; } } void cuda_FillOut_Dist_NCE(float* dist, uint32_t* neg_list, uint32_t nTrailPlus1, uint32_t BATCH_SIZE, uint32_t mindex, uint32_t batchsize) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_fillout_dist_nce<<<nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(dist, neg_list, nTrailPlus1, BATCH_SIZE, mindex, batchsize); } //optimized version -- hxd __global__ void cuda_matrix_product(float * a1, float * b1, float * a2, float * b2, float * a3, float * b3, float * c, uint32_t batchsize, uint32_t m, uint32_t n) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if(idx < n && idy < m ) { float sum = 0; for (uint32_t i = 0; i < batchsize; i++) { sum += a1[m*i + idy] * b1[n*i + idx]; sum += a2[m*i + idy] * b2[n*i + idx]; sum += a3[m*i + idy] * b3[n*i + idx]; } //uint32_t row = idy; // / n; //uint32_t col = idx;// % n; //float *a_iter = a+row; //float *b_iter = b+col; //float *a_end_pt = a_iter + (m*batchsize); //while(a_iter < a_end_pt) //{ // sum += (*a_iter) * (*b_iter); // a_iter += m; // b_iter += n; //} c[idy * n + idx] = sum; } } void cuda_Matrix_Product(float * a1, float * b1, float * a2, float * b2, float * a3, float * b3, float * c, uint32_t batchsize, uint32_t m, uint32_t n) //, uint32_t kept, float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_matrix_product<<<block_tail, thread_tail>>>(a1, b1, a2, b2, a3, b3, c, batchsize, m, n); //, kept, alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_matrix_product_sup(float * a, float * b, float * c, uint32_t batchsize, uint32_t m, uint32_t n) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < n && idy < m) { float sum = 0; for (uint32_t i = 0; i < batchsize; i++) { sum += a[m*i + idy] * b[n*i + idx]; } //uint32_t row = idy; // / n; //uint32_t col = idx;// % n; //float *a_iter = a+row; //float *b_iter = b+col; //float *a_end_pt = a_iter + (m*batchsize); //while(a_iter < a_end_pt) //{ // sum += (*a_iter) * (*b_iter); // a_iter += m; // b_iter += n; //} c[idy * n + idx] = sum; } } void cuda_Matrix_Product_Sup(float * a, float * b, float * c, uint32_t batchsize, uint32_t m, uint32_t n) //, uint32_t kept, float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (m + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_matrix_product_sup<<<block_tail, thread_tail>>>(a, b, c, batchsize, m, n); //, kept, alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_convolution_matrix_product_INTEX(float * deriv1, int * maxpooling_index1, float * deriv2, int * maxpooling_index2, float * deriv3, int * maxpooling_index3, float * wordLT, int * Word_Index1, int * Word_Index2, int * Word_Index3, int win_size, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int weightDim) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < output_dimension && idy < weightDim) { float sum = 0; int target_word1, target_word2, target_word3, widx1, widx2, widx3, wordpos, offset, precompIdx; wordpos = idy / Feature_Dimension; offset = idy % Feature_Dimension; for(int b=0;b<batchsize;b++) { precompIdx = b * output_dimension + idx; target_word1 = maxpooling_index1[precompIdx]; target_word2 = maxpooling_index2[precompIdx]; target_word3 = maxpooling_index3[precompIdx]; int widx1 = Word_Index1[target_word1 + wordpos]; int widx2 = Word_Index2[target_word2 + wordpos]; int widx3 = Word_Index3[target_word3 + wordpos]; sum += deriv1[precompIdx] * wordLT[Feature_Dimension * widx1 + offset]; sum += deriv2[precompIdx] * wordLT[Feature_Dimension * widx2 + offset]; sum += deriv3[precompIdx] * wordLT[Feature_Dimension * widx3 + offset]; } grad[idy * output_dimension + idx] = sum; } } void cuda_Convolution_Matrix_Product_INTEX(float * deriv1, int * maxpooling_index1, float * deriv2, int * maxpooling_index2, float * deriv3, int * maxpooling_index3, float * wordLT, int * Word_Index1, int * Word_Index2, int * Word_Index3, int win_size, int batchsize, int output_dimension, float * grad, int Feature_Dimension) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { int weightDim = Feature_Dimension * win_size; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (weightDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_convolution_matrix_product_INTEX<<<block_tail, thread_tail>>>(deriv1, maxpooling_index1, deriv2, maxpooling_index2, deriv3, maxpooling_index3, wordLT, Word_Index1, Word_Index2, Word_Index3, win_size, batchsize, output_dimension, grad, Feature_Dimension, weightDim); //,alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_convolution_matrix_product_sup(float * deriv, int * maxpooling_index, float * wordLT, int * Word_Index, int win_size, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int weightDim) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < output_dimension && idy < weightDim) { float sum = 0; int target_word1, widx1, wordpos, offset, precompIdx; wordpos = idy / Feature_Dimension; offset = idy % Feature_Dimension; for (int b = 0; b<batchsize; b++) { precompIdx = b * output_dimension + idx; target_word1 = maxpooling_index[precompIdx]; int widx1 = Word_Index[target_word1 + wordpos]; sum += deriv[precompIdx] * wordLT[Feature_Dimension * widx1 + offset]; } grad[idy * output_dimension + idx] = sum; } } void cuda_Convolution_Matrix_Product_Sup(float * deriv, int * maxpooling_index, float * wordLT, int * Word_Index, int win_size, int batchsize, int output_dimension, float * grad, int Feature_Dimension) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { int weightDim = Feature_Dimension * win_size; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (weightDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_convolution_matrix_product_sup<<<block_tail, thread_tail>>>(deriv, maxpooling_index, wordLT, Word_Index, win_size, batchsize, output_dimension, grad, Feature_Dimension, weightDim); //,alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_multiconv_matrix_product_INTEX(float * deriv1, int * maxpooling_index1, float * deriv2, int * maxpooling_index2, float * deriv3, int * maxpooling_index3, float * wordLT, int * Word_Index1, int * Word_Index2, int * Word_Index3, int win_size, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int weightDim, int currOuputDim, int pastOutdim) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < weightDim && idy < currOuputDim) { float sum = 0; int target_word1, target_word2, target_word3, widx1, widx2, widx3, wordpos, offset, precompIdx; wordpos = idx / Feature_Dimension; offset = idx % Feature_Dimension; for (int b = 0; b<batchsize; b++) { precompIdx = b * output_dimension + idy + pastOutdim; target_word1 = maxpooling_index1[precompIdx]; target_word2 = maxpooling_index2[precompIdx]; target_word3 = maxpooling_index3[precompIdx]; int widx1 = Word_Index1[target_word1 + wordpos]; int widx2 = Word_Index2[target_word2 + wordpos]; int widx3 = Word_Index3[target_word3 + wordpos]; sum += deriv1[precompIdx] * wordLT[Feature_Dimension * widx1 + offset]; sum += deriv2[precompIdx] * wordLT[Feature_Dimension * widx2 + offset]; sum += deriv3[precompIdx] * wordLT[Feature_Dimension * widx3 + offset]; } grad[idy * weightDim + idx] = sum; } } void cuda_MultiConv_Matrix_Product_INTEX(float * deriv1, int * maxpooling_index1, float * deriv2, int * maxpooling_index2, float * deriv3, int * maxpooling_index3, float * wordLT, int * Word_Index1, int * Word_Index2, int * Word_Index3, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int winsize, int fmsize, int accu, int accu_para) // the last two pointers are on host //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { int weightDim = Feature_Dimension * winsize; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((weightDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (fmsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_multiconv_matrix_product_INTEX<<<block_tail, thread_tail>>>(deriv1, maxpooling_index1, deriv2, maxpooling_index2, deriv3, maxpooling_index3, wordLT, Word_Index1, Word_Index2, Word_Index3, winsize, batchsize, output_dimension, (grad + accu_para), Feature_Dimension, weightDim, fmsize, accu); } __global__ void cuda_multiconv_matrix_product_sup(float * deriv, int * maxpooling_index, float * wordLT, int * Word_Index, int win_size, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int weightDim, int currOuputDim, int pastOutdim) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < weightDim && idy < currOuputDim) { float sum = 0; int target_word1, widx1, wordpos, offset, precompIdx; wordpos = idx / Feature_Dimension; offset = idx % Feature_Dimension; for (int b = 0; b<batchsize; b++) { precompIdx = b * output_dimension + idy + pastOutdim; target_word1 = maxpooling_index[precompIdx]; int widx1 = Word_Index[target_word1 + wordpos]; sum += deriv[precompIdx] * wordLT[Feature_Dimension * widx1 + offset]; } grad[idy * weightDim + idx] = sum; } } void cuda_MultiConv_Matrix_Product_Sup(float * deriv, int * maxpooling_index, float * wordLT, int * Word_Index, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int winsize, int fmsize, int accu, int accu_para) // the last two pointers are on host //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { int weightDim = Feature_Dimension * winsize; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((weightDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (fmsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_multiconv_matrix_product_sup<<<block_tail, thread_tail>>>(deriv, maxpooling_index, wordLT, Word_Index, winsize, batchsize, output_dimension, (grad + accu_para), Feature_Dimension, weightDim, fmsize, accu); } __global__ void cuda_multiconv_compute_wvderiv(float * deriv, int * maxpooling_index, float * weight, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int * winsizes, int * fmsizes) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < Feature_Dimension && idy < batchsize) { int currFilterset = 0, counter = 0, accuoffset = 0, currweightDim; float cacheDeriv; int wordIdx, i; for (int b = 0; b < output_dimension; b++) { if (counter >= fmsizes[currFilterset]) { counter = 0; accuoffset += Feature_Dimension * winsizes[currFilterset] * fmsizes[currFilterset]; currFilterset++; } currweightDim = Feature_Dimension * winsizes[currFilterset]; cacheDeriv = deriv[idy*output_dimension + b]; wordIdx = maxpooling_index[idy*output_dimension + b]; for (i = 0; i < winsizes[currFilterset]; i++) { grad[(wordIdx + i)*Feature_Dimension + idx] += cacheDeriv * weight[accuoffset + counter*currweightDim + (i*Feature_Dimension + idx)]; } counter++; } } } void cuda_MultiConv_Compute_WVDERIV(float * deriv, int * maxpooling_index, float * weight, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int * winsizes, int * fmsizes) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_multiconv_compute_wvderiv<<<block_tail, thread_tail>>>(deriv, maxpooling_index, weight, batchsize, output_dimension, grad, Feature_Dimension, winsizes, fmsizes); //,alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_conv_compute_wvderiv(float * deriv, int * maxpooling_index, float * weight, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int winsize) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < Feature_Dimension && idy < batchsize) { float cacheDeriv; int wordIdx, i; for (int b = 0; b < output_dimension; b++) { cacheDeriv = deriv[idy*output_dimension + b]; wordIdx = maxpooling_index[idy*output_dimension + b]; for (i = 0; i < winsize; i++) { grad[(wordIdx + i)*Feature_Dimension + idx] += cacheDeriv * weight[(i*Feature_Dimension + idx)*output_dimension + b]; } } } } void cuda_Conv_Compute_WVDERIV(float * deriv, int * maxpooling_index, float * weight, int batchsize, int output_dimension, float * grad, int Feature_Dimension, int winsize) //,float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_conv_compute_wvderiv<<<block_tail, thread_tail>>>(deriv, maxpooling_index, weight, batchsize, output_dimension, grad, Feature_Dimension, winsize); //,alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_convolution_matrix_multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Word_Index, uint32_t * Word_Margin, uint32_t Word_SeqLen, float * wordLT, float * con_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t win_size) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < output_dimension && idy < Word_SeqLen) { uint32_t mSmp_idx = Word_Margin[idy]; uint32_t wordEnd = Smp_Index[mSmp_idx]; uint32_t wordBegin = 0; if (mSmp_idx > 0) wordBegin = Smp_Index[mSmp_idx - 1]; if (idy >= wordBegin && idy <= (wordEnd - win_size)) { output[idy * output_dimension + idx] = 0; float sum = 0; for (int w = 0; w < win_size; w++) { uint32_t wordIdx = Word_Index[idy + w]; // get its vector from word lookup table for (uint32_t i = 0; i < Feature_dimension; i++) { sum += wordLT[wordIdx*Feature_dimension + i] * con_weight[(w * Feature_dimension + i)*output_dimension + idx]; } } output[idy * output_dimension + idx] = sum; } } } void cuda_Convolution_Matrix_Multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Word_Index, uint32_t * Word_Margin, uint32_t Word_SeqLen, float * wordLT, float * con_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t win_size) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (Word_SeqLen + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_convolution_matrix_multiply_INTEX<<<block_tail, thread_tail>>>(Smp_Index, batchsize, Word_Index, Word_Margin, Word_SeqLen, wordLT, con_weight, output, Feature_dimension, output_dimension, win_size); } __global__ void cuda_multiconv_matrix_multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Word_Index, uint32_t * Word_Margin, uint32_t Word_SeqLen, float * wordLT, float * con_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t * win_sizes, uint32_t * fm_sizes) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < output_dimension && idy < Word_SeqLen) { int filterClass = 0; uint32_t idx_offset = idx; uint32_t weightOffset = 0; while (idx_offset >= fm_sizes[filterClass]) { weightOffset += Feature_dimension * win_sizes[filterClass] * fm_sizes[filterClass]; idx_offset = idx_offset - fm_sizes[filterClass]; filterClass++; } uint32_t win_size = win_sizes[filterClass]; uint32_t mSmp_idx = Word_Margin[idy]; uint32_t wordEnd = Smp_Index[mSmp_idx]; uint32_t wordBegin = 0; if (mSmp_idx > 0) wordBegin = Smp_Index[mSmp_idx - 1]; if (idy >= wordBegin && idy <= (wordEnd - win_size)) { output[idy * output_dimension + idx] = 0; float sum = 0; uint32_t woffset = weightOffset + idx_offset * (win_size * Feature_dimension); for (int w = 0; w < win_size; w++) { uint32_t wordIdx = Word_Index[idy + w]; // get its vector from word lookup table for (uint32_t i = 0; i < Feature_dimension; i++) { sum += wordLT[wordIdx*Feature_dimension + i] * con_weight[woffset + w * Feature_dimension + i]; } } output[idy * output_dimension + idx] = sum; } } } void cuda_MultiConv_Matrix_Multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Word_Index, uint32_t * Word_Margin, uint32_t Word_SeqLen, float * wordLT, float * con_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t * win_sizes, uint32_t * fm_sizes) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (Word_SeqLen + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_multiconv_matrix_multiply_INTEX<<<block_tail, thread_tail>>>(Smp_Index, batchsize, Word_Index, Word_Margin, Word_SeqLen, wordLT, con_weight, output, Feature_dimension, output_dimension, win_sizes, fm_sizes); } __global__ void cuda_max_pooling(float * pooling_feas, int * Smp_Index, int batchsize, float * output, int * maxpooling_index, int output_dimension, int win_size) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if(idy < batchsize && idx < output_dimension) { //output[idy * output_dimension + idx] = 0; uint32_t col_end = Smp_Index[idy] - win_size; uint32_t col_begin = 0; if(idy > 0) { col_begin = Smp_Index[idy-1]; } float max_value = 0; int max_index = -1; for(uint32_t i=col_begin;i<=col_end; i++) { if(max_index == -1 || pooling_feas[i * output_dimension + idx] > max_value ) { max_value = pooling_feas[i * output_dimension + idx]; max_index = i; } } output[idy * output_dimension + idx] = max_value; maxpooling_index[idy * output_dimension + idx] = max_index; } } void cuda_Max_Pooling(float * pooling_feas, int * Smp_Index, int batchsize, float * output,int * maxpooling_index, int output_dimension, int win_size) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_max_pooling<<<block_tail, thread_tail>>>(pooling_feas, Smp_Index, batchsize, output, maxpooling_index, output_dimension, win_size); } __global__ void cuda_multi_max_pooling(float * pooling_feas, int * Smp_Index, int batchsize, float * output, int * maxpooling_index, int output_dimension, int * win_sizes, int * fm_sizes) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idy < batchsize && idx < output_dimension) { int filterClass = 0; uint32_t idx_offset = idx; while (idx_offset >= fm_sizes[filterClass]) { idx_offset = idx_offset - fm_sizes[filterClass]; filterClass++; } uint32_t win_size = win_sizes[filterClass]; //output[idy * output_dimension + idx] = 0; uint32_t col_end = Smp_Index[idy] - win_size; uint32_t col_begin = 0; if (idy > 0) { col_begin = Smp_Index[idy - 1]; } float max_value = 0; int max_index = -1; for (uint32_t i = col_begin; i <= col_end; i++) { if (max_index == -1 || pooling_feas[i * output_dimension + idx] > max_value) { max_value = pooling_feas[i * output_dimension + idx]; max_index = i; } } output[idy * output_dimension + idx] = max_value; maxpooling_index[idy * output_dimension + idx] = max_index; } } void cuda_Multi_Max_Pooling(float * pooling_feas, int * Smp_Index, int batchsize, float * output, int * maxpooling_index, int output_dimension, int * win_sizes, int * fm_sizes) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_multi_max_pooling<<<block_tail, thread_tail>>>(pooling_feas, Smp_Index, batchsize, output, maxpooling_index, output_dimension, win_sizes, fm_sizes); } __global__ void cuda_lstm_max_pooling(float * pooling_feas, int * Smp_Index, int batchsize, float * output, int * maxpooling_index, int output_dimension) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idy < batchsize && idx < output_dimension) { //output[idy * output_dimension + idx] = 0; uint32_t col_end = Smp_Index[idy] - 1; uint32_t col_begin = 0; if (idy > 0) { col_begin = Smp_Index[idy - 1]; } float max_value = 0; int max_index = -1; for (uint32_t i = col_begin; i <= col_end; i++) { if (max_index == -1 || pooling_feas[i * output_dimension + idx] > max_value) { max_value = pooling_feas[i * output_dimension + idx]; max_index = i; } } output[idy * output_dimension + idx] = max_value; maxpooling_index[idy * output_dimension + idx] = max_index; } } void cuda_LSTM_Max_Pooling(float * pooling_feas, int * Smp_Index, int batchsize, float * output, int * maxpooling_index, int output_dimension) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_lstm_max_pooling<<<block_tail, thread_tail>>>(pooling_feas, Smp_Index, batchsize, output, maxpooling_index, output_dimension); } __global__ void cuda_seq_sparse_matrix_multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Seg_Index, uint32_t * Seg_Margin, float * Seg_Len, uint32_t seg_size, uint32_t * Fea_Index, float * Fea_Value, uint32_t elementsize, float * mul_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t win_size) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if(idx < output_dimension && idy < batchsize) { uint32_t seg_end = Smp_Index[idy]; uint32_t seg_begin = 0; if(idy > 0) { seg_begin = Smp_Index[idy-1]; } float sum = 0; for(uint32_t word_idx = seg_begin; word_idx < seg_end; ++word_idx) { uint32_t col_end = Seg_Index[word_idx]; uint32_t col_begin = 0; if(word_idx > 0) { col_begin = Seg_Index[word_idx - 1]; } for(uint32_t i=col_begin;i<col_end; ++i) { uint32_t fea_idx = Fea_Index[i]; sum += Fea_Value[i] * mul_weight[((word_idx - seg_begin) * Feature_dimension + fea_idx) * output_dimension + idx]; } } output[idy * output_dimension + idx] = sum; } } /* Added by xinson, 2/17/2014 This version still computes sparse matrix (batch * input) multiples a dense matrix. However, each rwo of the sparse matrix is more than just BOW; it is a sequence of BOW. Put it another way, the sparse matrix has exactly the same structure as what is used in Convolutional_Sparse_Matrix_Multiply_INTEX. As a result, the dense matrix (mul_weight) is of size (Feature_dimension * win_size) * output_dimension, where win_size is how many words per input sequence instance. Note that all input should have exactly the same number of words. One word is represented as an instance of BOW. */ void cuda_SEQ_Sparse_Matrix_Multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Seg_Index, uint32_t * Seg_Margin, float * Seg_Len, uint32_t seg_size, uint32_t * Fea_Index, float * Fea_Value, uint32_t elementsize, float * mul_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t win_size) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_seq_sparse_matrix_multiply_INTEX<<<block_tail, thread_tail>>>(Smp_Index, batchsize, Seg_Index, Seg_Margin, Seg_Len, seg_size, Fea_Index, Fea_Value, elementsize, mul_weight, output, Feature_dimension, output_dimension,win_size); } __global__ void cuda_seq_sparse_matrix_transpose_multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Seg_Index, uint32_t * Seg_Margin, float * Seg_Len, uint32_t seg_size, uint32_t * Fea_Index, float * Fea_Value, uint32_t elementsize, float * mul_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t win_size) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < output_dimension) { uint32_t seg_begin = 0; for(uint32_t sample = 0; sample < batchsize; ++sample) { uint32_t seg_end = Smp_Index[sample]; float sum = 0; for(uint32_t word_idx = seg_begin; word_idx < seg_end; ++word_idx) { uint32_t col_end = Seg_Index[word_idx]; uint32_t col_begin = 0; if(word_idx > 0) { col_begin = Seg_Index[word_idx - 1]; } for(uint32_t i=col_begin;i<col_end; ++i) { uint32_t fea_idx = Fea_Index[i]; mul_weight[((word_idx - seg_begin) * Feature_dimension + fea_idx) * output_dimension + idx] += Fea_Value[i] * output[sample * output_dimension + idx]; } } seg_begin = seg_end; } } } /* Added by xinson, 2/17/2014 Given the same two inputs of an sparse matrix A (indexed by rows, size: batch * X), and a dense matrix B (size: batch * Y), computing C = A^T * B (size: X * Y). Although we compute the transpose of A multiplied by B, the code does not perform sparse transpose and indexing at all. Instead, it partitioned along the columns of the result C matrix. float * output is B. float * mul_weight is C. Zero initialization/clear on C is required in advance. */ void cuda_SEQ_Sparse_Matrix_Transpose_Multiply_INTEX(uint32_t * Smp_Index, uint32_t batchsize, uint32_t * Seg_Index, uint32_t * Seg_Margin, float * Seg_Len, uint32_t seg_size, uint32_t * Fea_Index, float * Fea_Value, uint32_t elementsize, float * mul_weight, float * output, uint32_t Feature_dimension, uint32_t output_dimension, uint32_t win_size) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM); dim3 block_tail((output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_seq_sparse_matrix_transpose_multiply_INTEX<<<block_tail, thread_tail>>>(Smp_Index, batchsize, Seg_Index, Seg_Margin, Seg_Len, seg_size, Fea_Index, Fea_Value, elementsize, mul_weight, output, Feature_dimension, output_dimension,win_size); } __global__ void cuda_matrix_weightadd(float * gpu_floats_a, float * gpu_floats_b, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < dimension && idy < batchsize) { if(keep != 0) { gpu_floats_a[idy*dimension+idx] += keep * gpu_floats_b[idy*dimension+idx] * mweight[start + idy]; } else { gpu_floats_a[idy*dimension+idx] = gpu_floats_b[idy*dimension+idx] * mweight[start + idy]; } } } void cuda_Matrix_WeightAdd(float * gpu_floats_a, float * gpu_floats_b, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_matrix_weightadd<<<block_tail ,thread_tail >>>(gpu_floats_a, gpu_floats_b, batchsize, dimension, mweight,start, keep); } __global__ void cuda_matrix_weightadd_ex(float * gpu_floats_a, float * gpu_floats_b, int * inver_neg_index, int * inver_neg_value, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < dimension && idy < batchsize) { int col_end = inver_neg_index[idy]; int col_begin = 0; if(idy > 0) { col_begin = inver_neg_index[idy - 1]; } float sum = 0; for(int i=col_begin; i<col_end; i++) { int row = inver_neg_value[i]; sum += gpu_floats_b[row * dimension + idx] * mweight[start + row]; } if(keep != 0) { gpu_floats_a[idy*dimension+idx] += keep * sum; } else { gpu_floats_a[idy*dimension+idx] =sum; } } } void cuda_Matrix_WeightAdd_EX(float * gpu_floats_a, float * gpu_floats_b, int * inver_neg_index, int * inver_neg_value, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_matrix_weightadd_ex<<<block_tail ,thread_tail >>>(gpu_floats_a, gpu_floats_b, inver_neg_index, inver_neg_value, batchsize, dimension, mweight, start, keep); } __global__ void cuda_sparse2dense_matrix(int * Smp_Idx, int * Fea_Idx, float * Fea_Value, float * matrix, int batchsize, int outputDimension) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { int end = Smp_Idx[idx]; int begin = idx >= 1 ? Smp_Idx[idx - 1] : 0; for (int k = begin; k < end; k++) { matrix[idx * outputDimension + Fea_Idx[k]] = Fea_Value[k]; } } } void cuda_Sparse2Dense_Matrix(int * Smp_Idx, int * Fea_Idx, float * Fea_Value, float * matrix, int batchsize, int outputDimension) { dim3 thread_tail(DEFAULT_THREAD_PER_BLOCK); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK); cuda_sparse2dense_matrix<<<block_tail,thread_tail>>>(Smp_Idx, Fea_Idx, Fea_Value, matrix, batchsize, outputDimension); } __global__ void cuda_matrix_aggragate(float * a1, float * a2, float * a3, float * b, uint32_t batchsize, uint32_t m) //uint32_t kept, float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < m) { float sum = 0; for(uint32_t i=0;i<batchsize;i++) { sum += a1[i * m + idx] + a2[i * m + idx] + a3[i * m + idx]; //* alpha[alpha_index * BATCH_SIZE + i]; } b[idx] = sum; } } void cuda_Matrix_Aggragate(float * a1, float * a2, float * a3, float * b, uint32_t batchsize, uint32_t m) //, uint32_t kept, float * alpha, // uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (m + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_matrix_aggragate<<<nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(a1,a2,a3,b,batchsize,m ); //,kept, alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_matrix_aggragate_sup(float * a, float * b, uint32_t batchsize, uint32_t m) //uint32_t kept, float * alpha, uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < m) { float sum = 0; for (uint32_t i = 0; i<batchsize; i++) { sum += a[i * m + idx]; //* alpha[alpha_index * BATCH_SIZE + i]; } b[idx] = sum; } } void cuda_Matrix_Aggragate_Sup(float * a, float * b, uint32_t batchsize, uint32_t m) //, uint32_t kept, float * alpha, // uint32_t ntrial, uint32_t BATCH_SIZE, uint32_t alpha_index) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (m + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_matrix_aggragate_sup<<<nBlockPerGrid, DEFAULT_THREAD_PER_BLOCK>>>(a, b, batchsize, m); //,kept, alpha, ntrial, BATCH_SIZE, alpha_index); } __global__ void cuda_matrix_add_offset(float * a, uint32_t offset_a, float * b, uint32_t offset_b, int len, float mweight) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < len) { a[offset_a + idx] += b[offset_b + idx] * mweight ; //* alpha[alpha_index * BATCH_SIZE + i]; } } void cuda_Matrix_Add_OFFSET(float * gpu_floats_a, uint32_t offset_a, float * gpu_floats_b, uint32_t offset_b, int len, float mweight) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (len + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_matrix_add_offset<<<nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(gpu_floats_a, offset_a, gpu_floats_b, offset_b, len, mweight); } cublasHandle_t global_handle; void cublas_Init() { cublasCreate(&global_handle); } void cublas_Destroy() { cublasDestroy(global_handle); } void cublas_Sasum(float *x, int len, int norm, float * result) { cublasSasum(global_handle, len , x , norm, result); } void cublas_Matrix_Multipy(float * delta, float * weight, float * delta_low, uint32_t batchsize, uint32_t m, uint32_t n, uint32_t inverse) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; float al = 1.0f; float bet = 0; if(inverse == 0) { cublasSgemm(global_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, batchsize, m, &al, weight, n, delta, m, &bet, delta_low, n); } else { cublasSgemm(global_handle, CUBLAS_OP_T, CUBLAS_OP_N, n, batchsize, m, &al, weight, m, delta, m, &bet, delta_low, n); } } //optimized version -- hxd & yeshen __global__ void cuda_cosine_similarity_ex_full(float * a, float * b, uint32_t * neg_list, float * c, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t dimension, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize && idy < nTrial) { float sumxx = 0; float sumyy = 0; float sumxy = 0; float * a_iter = a + (idx * dimension); float * b_iter = b + (neg_list[idy * BATCHSIZE + idx] * dimension); float * a_iter_end = a_iter + dimension; while(a_iter < a_iter_end) { sumxx += (*a_iter) * (*a_iter); sumyy += (*b_iter) * (*b_iter); sumxy += (*a_iter++) * (*b_iter++); } c[ (idy + 1) * BATCHSIZE + idx] = (float)( sumxy / ((float)sqrtf(sumxx * sumyy) + eps) ); } } void cuda_Cosine_Similarity_EX_Full(float * a, float * b, uint32_t * neg_list, float * c, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t dimension, float eps) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( nTrial + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_cosine_similarity_ex_full<<<block_tail , thread_tail >>>(a, b, neg_list, c, nTrial, BATCHSIZE, batchsize, dimension, eps); } __global__ void cuda_fillout_dist_nce_full(float* dist, uint32_t* neg_list, uint32_t nTrail, uint32_t BATCH_SIZE, uint32_t batchsize) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize && idy < nTrail) { uint32_t mtindex = neg_list[idy * BATCH_SIZE + idx]; dist[BATCH_SIZE + idy * BATCH_SIZE + idx] = dist[mtindex]; } } void cuda_FillOut_Dist_NCE_Full(float* dist, uint32_t* neg_list, uint32_t nTrail, uint32_t BATCH_SIZE, uint32_t batchsize) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( nTrail + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_fillout_dist_nce_full<<<block_tail , thread_tail >>>(dist, neg_list, nTrail, BATCH_SIZE, batchsize); } //optimized version -- hxd & yeshen. __global__ void cuda_deriv_cosine_ex_full(float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t m, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize && idy < nTrail) { float a = 0; float b = 0; float c = 0; float bc, a_bbbc, a_bccc, batchsizenorm; float * q_iter = q + idx*m; float * d_iter = d + neg_list[idy * BATCHSIZE + idx] * m; float * q_iter_end = q_iter + m; float * q_iter_P = q_iter; float * d_iter_P = d_iter; float * q_iter_end_P = q_iter_end; while(q_iter < q_iter_end) { b += (*q_iter) * (*q_iter); c += (*d_iter) * (*d_iter); a += (*q_iter++) * (*d_iter++); } b = sqrtf(b); c = sqrtf(c); bc = b*c + eps; a_bbbc = a/(b*b*b*c + eps); a_bccc = a/(b*c*c*c + eps); batchsizenorm = 1.0f / batchsize; q_iter = q_iter_P; d_iter = d_iter_P; q_iter_end = q_iter_end_P; float * dcq_iter = dcq + idy * (BATCHSIZE * m) + idx * m; float * dcd_iter = dcd + idy * (BATCHSIZE * m) + idx * m; while(q_iter < q_iter_end) { *dcq_iter++ = (1.0f - *q_iter) * ( 1.0f + *q_iter) * (*d_iter / bc - *q_iter * a_bbbc) * batchsizenorm; *dcd_iter++ = (1.0f - *d_iter) * ( 1.0f + *d_iter) * (*q_iter / bc - *d_iter * a_bccc) * batchsizenorm; ++q_iter; ++d_iter; } } } void cuda_Deriv_Cosine_EX_Full( float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t m, float eps) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( nTrail + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_deriv_cosine_ex_full<<<block_tail ,thread_tail >>>(q, d, neg_list, dcq, dcd, nTrail, BATCHSIZE, batchsize, m, eps); } __global__ void cuda_deriv_cosine_linear_ex_full(float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t m, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize && idy < nTrail) { float a = 0; float b = eps; float c = eps; uint32_t mIndex = neg_list[idy * BATCHSIZE + idx]; for(uint32_t i=0;i<m;i++) { a += q[idx * m + i] * d[mIndex * m + i]; b += q[idx * m + i] * q[idx * m + i]; c += d[mIndex * m + i] * d[mIndex * m + i]; } b = sqrtf(b); c = sqrtf(c); for(uint32_t i=0;i<m;i++) { dcq[idy * BATCHSIZE * m + idx * m + i] = (float)( (d[mIndex*m+i] * 1.0f / (b*c) - q[idx*m+i] * a * 1.0f / (b*b*b*c)) ); dcd[idy * BATCHSIZE * m + idx * m + i] = (float)( (q[idx*m+i] * 1.0f / (b*c) - d[mIndex*m+i] * a * 1.0f / (b*c*c*c)) ); dcq[idy * BATCHSIZE * m + idx * m + i] = dcq[idy * BATCHSIZE * m + idx * m + i] * 1.0f / batchsize; dcd[idy * BATCHSIZE * m + idx * m + i] = dcd[idy * BATCHSIZE * m + idx * m + i] * 1.0f / batchsize; } } } void cuda_Deriv_Cosine_Linear_EX_Full(float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t m, float eps) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( nTrail + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_deriv_cosine_linear_ex_full<<<block_tail ,thread_tail >>>(q,d,neg_list,dcq,dcd, nTrail, BATCHSIZE, batchsize, m, eps); } __global__ void cuda_deriv_cosine_rectified_ex_full(float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t m, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize && idy < nTrail) { float a = 0; float b = eps; float c = eps; uint32_t mIndex = neg_list[idy * BATCHSIZE + idx]; for(uint32_t i=0;i<m;i++) { a += q[idx * m + i] * d[mIndex * m + i]; b += q[idx * m + i] * q[idx * m + i]; c += d[mIndex * m + i] * d[mIndex * m + i]; } b = sqrtf(b); c = sqrtf(c); for(uint32_t i=0;i<m;i++) { if(q[idx*m+i] == 0) { dcq[idy * BATCHSIZE * m + idx * m + i] = 0; } else { dcq[idy * BATCHSIZE * m + idx * m + i] = (float)( (d[mIndex*m+i] * 1.0f / (b*c) - q[idx*m+i] * a * 1.0f / (b*b*b*c)) ); } dcq[idy * BATCHSIZE * m + idx * m + i] = dcq[idy * BATCHSIZE * m + idx * m + i] * 1.0f / batchsize; if(d[mIndex*m+i] == 0) { dcd[idy * BATCHSIZE * m + idx * m + i] = 0; } else { dcd[idy * BATCHSIZE * m + idx * m + i] = (float)( (q[idx*m+i] * 1.0f / (b*c) - d[mIndex*m+i] * a * 1.0f / (b*c*c*c)) ); } dcd[idy * BATCHSIZE * m + idx * m + i] = dcd[idy * BATCHSIZE * m + idx * m + i] * 1.0f / batchsize; } } } void cuda_Deriv_Cosine_Rectified_EX_Full( float * q, float * d, uint32_t * neg_list, float * dcq, float * dcd, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t m, float eps) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( nTrail + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_deriv_cosine_rectified_ex_full<<<block_tail ,thread_tail >>>(q, d, neg_list, dcq, dcd, nTrail, BATCHSIZE, batchsize, m, eps); } __global__ void cuda_matrix_weightadd_full(float * gpu_floats_a, float * gpu_floats_b, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < dimension && idy < batchsize) { for(int i=0;i<nTrail;i++) { gpu_floats_a[idy*dimension+idx] += keep * gpu_floats_b[ i * BATCHSIZE * dimension + idy * dimension + idx] * mweight[start + i * BATCHSIZE + idy]; } } } /// b add to a. void cuda_Matrix_WeightAdd_Full(float * gpu_floats_a, float * gpu_floats_b, uint32_t nTrail, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_matrix_weightadd_full<<<block_tail ,thread_tail >>>(gpu_floats_a, gpu_floats_b, nTrail, BATCHSIZE, batchsize, dimension, mweight, start, keep); } __global__ void cuda_matrix_weightadd_ex_full(float * gpu_floats_a, float * gpu_floats_b, int * inver_neg_index, int * inver_neg_value, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < dimension && idy < batchsize) { for(int n=0; n<nTrial; n++) { int col_end = inver_neg_index[n * BATCHSIZE + idy]; int col_begin = 0; if(idy > 0) { col_begin = inver_neg_index[n * BATCHSIZE + idy - 1]; } float sum = 0; for(int i=col_begin; i<col_end; i++) { int row = inver_neg_value[n * BATCHSIZE + i]; sum += gpu_floats_b[n * BATCHSIZE * dimension + row * dimension + idx] * mweight[start + n * BATCHSIZE + row]; } gpu_floats_a[idy*dimension+idx] += keep * sum; } } } void cuda_Matrix_WeightAdd_EX_Full(float * gpu_floats_a, float * gpu_floats_b, int * inver_neg_index, int * inver_neg_value, uint32_t nTrial, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t dimension, float * mweight, int start, int keep) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_matrix_weightadd_ex_full<<<block_tail ,thread_tail >>>(gpu_floats_a, gpu_floats_b, inver_neg_index, inver_neg_value, nTrial, BATCHSIZE, batchsize, dimension, mweight, start, keep); } __global__ void cuda_cosine_similarity_subspace(float * a, float * b, float * c, uint32_t labelDim, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t subspaceDim, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize && idy < labelDim) { float sumxx = 0; float sumyy = 0; float sumxy = 0; int id_start = idx * (labelDim * subspaceDim) + idy * subspaceDim; for(uint32_t i=0;i<subspaceDim;i++) { sumxx += a[id_start + i] * a[id_start + i]; sumyy += b[id_start + i] * b[id_start + i]; sumxy += a[id_start + i] * b[id_start + i]; } c[idx * labelDim + idy] = (float)( sumxy * 1.0f / (sqrtf( (float)(sumxx * sumyy)) + eps) ); } } void cuda_Cosine_Similarity_SubSpace(float * a, float * b, float * c, uint32_t labelDim, uint32_t BATCHSIZE, uint32_t batchsize, uint32_t subspaceDim, float eps) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( labelDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_cosine_similarity_subspace<<<block_tail , thread_tail >>>(a, b, c, labelDim, BATCHSIZE, batchsize, subspaceDim, eps); } __global__ void cuda_softmax(float * a, float * b,uint32_t labelDim, uint32_t batchsize, float gamma) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize ) { float log_sum = 0; for(int i = 0; i<labelDim; i++) { float tmpa = gamma * a[idx * labelDim + i]; if( i == 0) { log_sum = tmpa; continue; } else { if(log_sum >= tmpa) { log_sum = log_sum + logf(1 + expf(gamma * (tmpa - log_sum))); } else { log_sum = tmpa + logf(1 + expf(gamma * (log_sum - tmpa))); } } } for(int i=0;i<labelDim; i++) { float tmpa = gamma * a[idx * labelDim + i]; b[idx * labelDim + i] = expf( tmpa - log_sum); } } } void cuda_SoftMax(float * a, float * b,uint32_t labelDim, uint32_t batchsize, float gamma) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_softmax<<<nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(a, b, labelDim, batchsize, gamma); } __global__ void cuda_deriv_cosine_subspace(float * q, float * d, float * dcq, float * dcd, float * alpha, uint32_t act_type, uint32_t batchsize, uint32_t labelDim, uint32_t subspaceDim, float gamma, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize && idy < labelDim) { float alpha_v = gamma * alpha[idx * labelDim + idy]; int id_start = idx * labelDim * subspaceDim + idy * subspaceDim; float a = 0; float b = eps; float c = eps; for(uint32_t i=0;i<subspaceDim;i++) { a += q[id_start + i] * d[id_start + i]; b += q[id_start + i] * q[id_start + i]; c += d[id_start + i] * d[id_start + i]; } b = sqrtf(b); c = sqrtf(c); /// tanh function. if(act_type == 0) { for(uint32_t i=0;i<subspaceDim;i++) { dcq[id_start + i] = (float)( (1 - q[id_start + i]) * ( 1 + q[id_start + i]) * (d[id_start + i] * 1.0f / (b*c) - q[id_start + i] * a * 1.0f / (b*b*b*c)) ); dcd[id_start + i] = (float)( (1 - d[id_start + i]) * ( 1 + d[id_start + i]) * (q[id_start + i] * 1.0f / (b*c) - d[id_start + i] * a * 1.0f / (b*c*c*c)) ); dcq[id_start + i] = alpha_v * dcq[id_start + i] * 1.0f / batchsize; dcd[id_start + i] = alpha_v * dcd[id_start + i] * 1.0f / batchsize; } } /// linear function. else if(act_type == 1) { for(uint32_t i=0;i<subspaceDim;i++) { dcq[id_start + i] = (float)( (d[id_start + i] * 1.0f / (b*c) - q[id_start + i] * a * 1.0f / (b*b*b*c)) ); dcd[id_start + i] = (float)( (q[id_start + i] * 1.0f / (b*c) - d[id_start + i] * a * 1.0f / (b*c*c*c)) ); dcq[id_start + i] = alpha_v * dcq[id_start + i] * 1.0f / batchsize; dcd[id_start + i] = alpha_v * dcd[id_start + i] * 1.0f / batchsize; } } /// else if(act_type == 2) { for(uint32_t i=0;i<subspaceDim;i++) { if(fabsf(q[id_start + i]) < eps) { dcq[id_start + i] = 0; } else { dcq[id_start + i] = (float)( (d[id_start + i] * 1.0f / (b*c) - q[id_start +i] * a * 1.0f / (b*b*b*c)) ); } dcq[id_start + i] = alpha_v * dcq[id_start + i] * 1.0f / batchsize; if(fabsf(d[id_start + i]) < eps) { dcd[id_start + i ] =0; } else { dcd[id_start + i] = (float)( (q[ id_start + i] * 1.0f / (b*c) - d[ id_start + i ] * a * 1.0f / (b*c*c*c)) ); } dcd[id_start + i] = alpha_v * dcd[id_start + i] * 1.0f / batchsize; } } } } void cuda_Deriv_Cosine_Subspace( float * q, float * d, float * dcq, float * dcd, float * alpha, uint32_t act_type, uint32_t batchsize, uint32_t labelDim, uint32_t subspaceDim, float gamma, float eps) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( labelDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_deriv_cosine_subspace<<< block_tail ,thread_tail >>>(q, d, dcq, dcd, alpha, act_type, batchsize, labelDim, subspaceDim, gamma, eps); } __global__ void cuda_deriv_innerproduct(float * q, float * d, float * dcq, float * dcd, float * alpha, uint32_t act_type, uint32_t batchsize, uint32_t Dim, float gamma, float eps) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < batchsize) { float alpha_v = gamma * alpha[idx]; int id_start = idx * Dim; /// tanh function. if(act_type == 0) { for(uint32_t i=0;i<Dim;i++) { dcq[id_start + i] = (float)( (1 - q[id_start + i]) * ( 1 + q[id_start + i]) * d[id_start + i] * alpha_v * 1.0f ); dcd[id_start + i] = (float)( (1 - d[id_start + i]) * ( 1 + d[id_start + i]) * q[id_start + i] * alpha_v * 1.0f ); //dcq[id_start + i] = alpha_v * dcq[id_start + i] ; //dcd[id_start + i] = alpha_v * dcd[id_start + i] ; } } /// linear function. else if(act_type == 1) { for(uint32_t i=0;i<Dim;i++) { dcq[id_start + i] = (float)( d[id_start + i] * alpha_v * 1.0f ); dcd[id_start + i] = (float)( q[id_start + i] * alpha_v * 1.0f ); // dcq[id_start + i] = alpha_v * dcq[id_start + i] * 1.0f / batchsize; // dcd[id_start + i] = alpha_v * dcd[id_start + i] * 1.0f / batchsize; } } /// else if(act_type == 2) { for(uint32_t i=0;i<Dim;i++) { if(fabsf(q[id_start + i]) < eps) { dcq[id_start + i] = 0; } else { dcq[id_start + i] = (float)( d[id_start + i] * alpha_v * 1.0f ); } if(fabsf(d[id_start + i]) < eps) { dcd[id_start + i ] =0; } else { dcd[id_start + i] = (float)( q[id_start + i] * alpha_v * 1.0f ); } } } } } void cuda_Deriv_InnerProduct( float * q, float * d, float * dcq, float * dcd, float * alpha, uint32_t act_type, uint32_t batchsize, uint32_t Dim, float gamma, float eps) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; //dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); //dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( labelDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_deriv_innerproduct<<< nBlockPerGrid ,DEFAULT_THREAD_PER_BLOCK >>>(q, d, dcq, dcd, alpha, act_type, batchsize, Dim, gamma, eps); } __global__ void cuda_fillout_composite(float* data, uint32_t* feaIdx, float* compData, float* contextLT, uint32_t inputdim, uint32_t d1, uint32_t d2, uint32_t batchsize) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < inputdim && idy < batchsize) { if (idx < d1) { compData[idy * inputdim + idx] = data[idy * d1 + idx]; } else { uint32_t prodctfea = feaIdx[idy]; compData[idy * inputdim + idx] = contextLT[prodctfea * d2 + idx - d1]; } } } __global__ void cuda_fillout_composite_rev(float* data, float* compData, float* contextDeriv, uint32_t inputdim, uint32_t d1, uint32_t d2, uint32_t batchsize) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < inputdim && idy < batchsize) { if (idx < d1) { data[idy * d1 + idx] = compData[idy * inputdim + idx]; } else { contextDeriv[idy * d2 + idx - d1] = compData[idy * inputdim + idx]; } } } void cuda_FillOut_Composite(float* data, uint32_t* feaIdx, float* compData, float* context, uint32_t d1, uint32_t d2, uint32_t batchsize, uint32_t direction) { //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = (batchsize + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; uint32_t inputdim = d1 + d2; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((inputdim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); if (direction != 0) cuda_fillout_composite<<<block_tail, thread_tail>>>(data, feaIdx, compData, context, inputdim, d1, d2, batchsize); else cuda_fillout_composite_rev<<<block_tail, thread_tail>>>(data, compData, context, inputdim, d1, d2, batchsize); } __global__ void cuda_sparse_update_lookup(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv1, float * ltDeriv2, float * ltDeriv3, int seq1size, int sq1sq2, int IDnum, int Feature_Dimension, float lr) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < Feature_Dimension && idy < IDnum) { int colend = Fea_Idx[idy]; int colbegin = 0; if (idy > 0) colbegin = Fea_Idx[idy - 1]; float accu = 0; for (int t = colbegin; t < colend; t++) { int tidx = Seq[t]; if (tidx < seq1size) { accu += ltDeriv1[tidx*Feature_Dimension + idx]; } else if (tidx < sq1sq2) { accu += ltDeriv2[(tidx - seq1size)*Feature_Dimension + idx]; } else { accu += ltDeriv3[(tidx - sq1sq2)*Feature_Dimension + idx]; } } int wid = Fea_ID[idy]; int updatepos = wid*Feature_Dimension + idx; lookupt[updatepos] = lookupt[updatepos] - lr * accu; } } void cuda_Sparse_Update_Lookup(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv1, float * ltDeriv2, float * ltDeriv3, int seq1size, int seq2size, int IDnum, int Feature_Dimension, float lr) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (IDnum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); int sq1sq2 = seq1size + seq2size; cuda_sparse_update_lookup<<<block_tail, thread_tail>>>(lookupt, Fea_ID, Fea_Idx, Seq, ltDeriv1, ltDeriv2, ltDeriv3, seq1size, sq1sq2, IDnum, Feature_Dimension, lr); } __global__ void cuda_sparse_update_lookup_ada(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv1, float * ltDeriv2, float * ltDeriv3, int seq1size, int sq1sq2, int IDnum, int Feature_Dimension, float lr, float * adaGrad, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < Feature_Dimension && idy < IDnum) { int colend = Fea_Idx[idy]; int colbegin = 0; if (idy > 0) colbegin = Fea_Idx[idy - 1]; float accu = 0; for (int t = colbegin; t < colend; t++) { int tidx = Seq[t]; if (tidx < seq1size) { accu += ltDeriv1[tidx*Feature_Dimension + idx]; } else if (tidx < sq1sq2) { accu += ltDeriv2[(tidx - seq1size)*Feature_Dimension + idx]; } else { accu += ltDeriv3[(tidx - sq1sq2)*Feature_Dimension + idx]; } } //int wid = Fea_ID[idy]; int updatepos = Fea_ID[idy] * Feature_Dimension + idx; float tempf = adaGrad[updatepos] + accu * accu; adaGrad[updatepos] = tempf; lookupt[updatepos] = lookupt[updatepos] - (lr * accu / (sqrtf(tempf)+eps)); } } void cuda_Sparse_Update_Lookup_Ada(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv1, float * ltDeriv2, float * ltDeriv3, int seq1size, int seq2size, int IDnum, int Feature_Dimension, float lr, float * adaGrad, float eps) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (IDnum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); int sq1sq2 = seq1size + seq2size; cuda_sparse_update_lookup_ada<<<block_tail, thread_tail>>>(lookupt, Fea_ID, Fea_Idx, Seq, ltDeriv1, ltDeriv2, ltDeriv3, seq1size, sq1sq2, IDnum, Feature_Dimension, lr, adaGrad, eps); } __global__ void cuda_sparse_update_lookup_update(float * lookupt_update, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv1, float * ltDeriv2, float * ltDeriv3, int seq1size, int sq1sq2, int IDnum, int Feature_Dimension, float lr) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < Feature_Dimension && idy < IDnum) { int colend = Fea_Idx[idy]; int colbegin = 0; if (idy > 0) colbegin = Fea_Idx[idy - 1]; float accu = 0; for (int t = colbegin; t < colend; t++) { int tidx = Seq[t]; if (tidx < seq1size) { accu += ltDeriv1[tidx*Feature_Dimension + idx]; } else if (tidx < sq1sq2) { accu += ltDeriv2[(tidx - seq1size)*Feature_Dimension + idx]; } else { accu += ltDeriv3[(tidx - sq1sq2)*Feature_Dimension + idx]; } } int wid = Fea_ID[idy]; int updatepos = wid*Feature_Dimension + idx; lookupt_update[updatepos] = lookupt_update[updatepos] + lr * accu; } } void cuda_Sparse_Update_Lookup_Update(float * lookupt_update, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv1, float * ltDeriv2, float * ltDeriv3, int seq1size, int seq2size, int IDnum, int Feature_Dimension, float lr) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (IDnum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); int sq1sq2 = seq1size + seq2size; cuda_sparse_update_lookup_update<<<block_tail, thread_tail >>>(lookupt_update, Fea_ID, Fea_Idx, Seq, ltDeriv1, ltDeriv2, ltDeriv3, seq1size, sq1sq2, IDnum, Feature_Dimension, lr); } __global__ void cuda_sparse_update_lookup_sup(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv, int IDnum, int Feature_Dimension, float lr) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < Feature_Dimension && idy < IDnum) { int colend = Fea_Idx[idy]; int colbegin = 0; if (idy > 0) colbegin = Fea_Idx[idy - 1]; float accu = 0; for (int t = colbegin; t < colend; t++) { accu += ltDeriv[Seq[t] * Feature_Dimension + idx]; } //int wid = Fea_ID[idy]; int updatepos = Fea_ID[idy] * Feature_Dimension + idx; lookupt[updatepos] = lookupt[updatepos] - lr * accu; } } void cuda_Sparse_Update_Lookup_Sup(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv, int IDnum, int Feature_Dimension, float lr) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (IDnum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_sparse_update_lookup_sup<<<block_tail, thread_tail>>>(lookupt, Fea_ID, Fea_Idx, Seq, ltDeriv, IDnum, Feature_Dimension, lr); } __global__ void cuda_sparse_update_lookup_ada_sup(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv, int IDnum, int Feature_Dimension, float lr, float * adaGrad, float eps) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < Feature_Dimension && idy < IDnum) { int colend = Fea_Idx[idy]; int colbegin = 0; if (idy > 0) colbegin = Fea_Idx[idy - 1]; float accu = 0; for (int t = colbegin; t < colend; t++) { accu += ltDeriv[Seq[t] * Feature_Dimension + idx]; } //int wid = Fea_ID[idy]; int updatepos = Fea_ID[idy] * Feature_Dimension + idx; float tempf = adaGrad[updatepos] + accu * accu; adaGrad[updatepos] = tempf; lookupt[updatepos] = lookupt[updatepos] - (lr * accu / (sqrtf(tempf) + eps)); } } void cuda_Sparse_Update_Lookup_Ada_Sup(float * lookupt, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv, int IDnum, int Feature_Dimension, float lr, float * adaGrad, float eps) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (IDnum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_sparse_update_lookup_ada_sup<<<block_tail, thread_tail>>>(lookupt, Fea_ID, Fea_Idx, Seq, ltDeriv, IDnum, Feature_Dimension, lr, adaGrad, eps); } __global__ void cuda_sparse_update_lookup_update_sup(float * lookupt_update, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv, int IDnum, int Feature_Dimension, float lr) { uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < Feature_Dimension && idy < IDnum) { int colend = Fea_Idx[idy]; int colbegin = 0; if (idy > 0) colbegin = Fea_Idx[idy - 1]; float accu = 0; for (int t = colbegin; t < colend; t++) { accu += ltDeriv[Seq[t] * Feature_Dimension + idx]; } //int wid = Fea_ID[idy]; int updatepos = Fea_ID[idy] * Feature_Dimension + idx; lookupt_update[updatepos] = lookupt_update[updatepos] + lr * accu; } } void cuda_Sparse_Update_Lookup_Update_Sup(float * lookupt_update, int * Fea_ID, int * Fea_Idx, int * Seq, float * ltDeriv, int IDnum, int Feature_Dimension, float lr) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((Feature_Dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (IDnum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_sparse_update_lookup_update_sup<<<block_tail, thread_tail>>>(lookupt_update, Fea_ID, Fea_Idx, Seq, ltDeriv, IDnum, Feature_Dimension, lr); } __global__ void cuda_init_float_array(float * target, float val, int size) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < size) { target[idx] = val; } } void cuda_Init_Float_Array(float * target, float val, int size) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (size + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; //dim3 thread_tail(DEFAULT_THREAD_PER_DIM,DEFAULT_THREAD_PER_DIM); //dim3 block_tail((batchsize + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, ( labelDim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); cuda_init_float_array<<< nBlockPerGrid, DEFAULT_THREAD_PER_BLOCK >>>(target, val, size); } __global__ void cuda_lstm_input_batch_product(uint32_t * Word_Index, uint32_t Word_SeqLen, float * wordLT, float * weight, float * outputA, float * outputI, float * outputF, float * outputO, uint32_t Feature_dimension, uint32_t output_dimension) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < 4*output_dimension && idy < Word_SeqLen) { uint32_t wordIdx = Word_Index[idy]; uint32_t hdim = output_dimension / 2; uint32_t matrixIdx = idx / hdim; uint32_t inmatrixIdx = idx % hdim; uint32_t startpos = matrixIdx * hdim * Feature_dimension; float sum = 0; for (uint32_t i = 0; i < Feature_dimension; i++) { sum += wordLT[wordIdx*Feature_dimension + i] * weight[startpos + i*hdim + inmatrixIdx]; } if (matrixIdx < 2) outputA[idy * output_dimension + (matrixIdx % 2) * hdim + inmatrixIdx] = sum; else if (matrixIdx < 4) outputI[idy * output_dimension + (matrixIdx % 2) * hdim + inmatrixIdx] = sum; else if (matrixIdx < 6) outputF[idy * output_dimension + (matrixIdx % 2) * hdim + inmatrixIdx] = sum; else if (matrixIdx < 8) outputO[idy * output_dimension + (matrixIdx % 2) * hdim + inmatrixIdx] = sum; } } void cuda_LSTM_Input_Batch_Product(uint32_t * Word_Index, uint32_t Word_SeqLen, float * wordLT, float * weight, float * outputA, float * outputI, float * outputF, float * outputO, uint32_t Feature_dimension, uint32_t output_dimension) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((4*output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (Word_SeqLen + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_lstm_input_batch_product<<<block_tail, thread_tail>>>(Word_Index, Word_SeqLen, wordLT, weight, outputA, outputI, outputF, outputO, Feature_dimension, output_dimension); } __global__ void cuda_lstm_sequence_forward(int * Smp_Index, int batchsize, float * reweight, float * bias, float * outputA, float * outputI, float * outputF, float * outputO, float * outputC, float * output, int output_dimension, int blocksize) { //uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; //uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = threadIdx.x; uint32_t idy = blockIdx.y; int wordEnd = Smp_Index[idy]; int wordBegin = 0; if (idy > 0) wordBegin = Smp_Index[idy - 1]; __shared__ float _h[300]; // to-do: hard-coded, should be configurable float bias_a; float bias_i; float bias_f; float bias_o; float _c; float h; if (blockIdx.x == 0) // forward lstm cell { //load bias for forward LSTM bias_a = bias[idx]; bias_i = bias[output_dimension + idx]; bias_f = bias[2 * output_dimension + idx]; bias_o = bias[3 * output_dimension + idx]; //__syncthreads(); // make sure all bias data be loaded before computation for (int w = wordBegin; w < wordEnd; w++) { float a = outputA[output_dimension*w + idx]; float i = outputI[output_dimension*w + idx]; float f = outputF[output_dimension*w + idx]; float o = outputO[output_dimension*w + idx]; if (w > wordBegin) { for (int j = 0; j < blockDim.x; j++) { a += reweight[j*blockDim.x + idx] * _h[j]; i += reweight[2 * blocksize + j*blockDim.x + idx] * _h[j]; f += reweight[4 * blocksize + j*blockDim.x + idx] * _h[j]; o += reweight[6 * blocksize + j*blockDim.x + idx] * _h[j]; } } a += bias_a; i += bias_i; f += bias_f; o += bias_o; a = tanhf(a); i = 1.0 / (1.0 + expf(-i)); f = 1.0 / (1.0 + expf(-f)); o = 1.0 / (1.0 + expf(-o)); if (w > wordBegin) _c = i * a + f * _c; else _c = i * a; h = o * tanhf(_c); __syncthreads(); // make sure all threads have read _h before overwrite it _h[idx] = h; __syncthreads(); // make sure all writes are done before any thread read it outputC[w * output_dimension + idx] = _c; outputA[w * output_dimension + idx] = a; outputI[w * output_dimension + idx] = i; outputF[w * output_dimension + idx] = f; outputO[w * output_dimension + idx] = o; output[w * output_dimension + idx] = h; } } else { //load bias for reverse LSTM uint32_t gidx = blockDim.x + idx; bias_a = bias[gidx]; bias_i = bias[output_dimension + gidx]; bias_f = bias[2 * output_dimension + gidx]; bias_o = bias[3 * output_dimension + gidx]; //__syncthreads(); // make sure all bias data be loaded before computation for (int w = wordEnd - 1; w >= wordBegin; w--) { float a = outputA[output_dimension*w + gidx]; float i = outputI[output_dimension*w + gidx]; float f = outputF[output_dimension*w + gidx]; float o = outputO[output_dimension*w + gidx]; if (w < wordEnd - 1) { for (int j = 0; j < blockDim.x; j++) { a += reweight[blocksize + j*blockDim.x + idx] * _h[j]; i += reweight[3 * blocksize + j*blockDim.x + idx] * _h[j]; f += reweight[5 * blocksize + j*blockDim.x + idx] * _h[j]; o += reweight[7 * blocksize + j*blockDim.x + idx] * _h[j]; } } a += bias_a; i += bias_i; f += bias_f; o += bias_o; a = tanhf(a); i = 1.0 / (1.0 + expf(-i)); f = 1.0 / (1.0 + expf(-f)); o = 1.0 / (1.0 + expf(-o)); if (w < wordEnd - 1) _c = i * a + f * _c; else _c = i * a; h = o * tanhf(_c); __syncthreads(); // make sure all threads have read _h before overwrite it _h[idx] = h; __syncthreads(); // make sure all writes are done before any thread read it outputC[w * output_dimension + gidx] = _c; outputA[w * output_dimension + gidx] = a; outputI[w * output_dimension + gidx] = i; outputF[w * output_dimension + gidx] = f; outputO[w * output_dimension + gidx] = o; output[w * output_dimension + gidx] = h; } } } void cuda_LSTM_Sequence_Forward(int * Smp_Index, int batchsize, float * reweight, float * bias, float * outputA, float * outputI, float * outputF, float * outputO, float * outputC, float * output, int output_dimension) { uint32_t hdim = output_dimension / 2; dim3 thread_tail(hdim, 1); dim3 block_tail(2, batchsize); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_lstm_sequence_forward<<<block_tail, thread_tail>>>(Smp_Index, batchsize, reweight, bias, outputA, outputI, outputF, outputO, outputC, output, output_dimension, hdim*hdim); } __global__ void cuda_lstm_sequence_backward(int * Smp_Index, int batchsize, float * reweight, int * maxpooling_index, float * derivup, float * outputA, float * outputI, float * outputF, float * outputO, float * outputC, float * output, int output_dimension, int blocksize) { //uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; //uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t idx = threadIdx.x; uint32_t idy = blockIdx.y; int wordEnd = Smp_Index[idy]; int wordBegin = 0; if (idy > 0) wordBegin = Smp_Index[idy - 1]; __shared__ float derivA[300]; // to-do: hard-coded, should be configurable __shared__ float derivI[300]; __shared__ float derivF[300]; __shared__ float derivO[300]; float _derivc, deriv_c; float derivh; float a, i, f, o, c_tanh; if (blockIdx.x == 1) // reverse lstm cell backprop { int gidx = blockDim.x + idx; int mpoolingIdx = maxpooling_index[output_dimension * idy + gidx]; for (int w = wordBegin; w < wordEnd; w++) { derivh = 0; if (mpoolingIdx == w) derivh += derivup[output_dimension * idy + gidx]; if (w > wordBegin) { for (int j = 0; j < blockDim.x; j++) { derivh += reweight[blocksize + idx*blockDim.x + j] * derivA[j]; derivh += reweight[3 * blocksize + idx*blockDim.x + j] * derivI[j]; derivh += reweight[5 * blocksize + idx*blockDim.x + j] * derivF[j]; derivh += reweight[7 * blocksize + idx*blockDim.x + j] * derivO[j]; } } c_tanh = tanhf(outputC[output_dimension*w + gidx]); o = outputO[output_dimension*w + gidx]; a = outputA[output_dimension*w + gidx]; i = outputI[output_dimension*w + gidx]; f = outputF[output_dimension*w + gidx]; float d_oinput = derivh * c_tanh * o * (1 - o); deriv_c = derivh * o * (1 + c_tanh) * (1 - c_tanh); if (w > wordBegin) deriv_c += f * _derivc; float d_finput; if (w < wordEnd - 1) d_finput = deriv_c * outputC[output_dimension*(w + 1) + gidx] * f * (1 - f); else d_finput = 0; float d_iinput = deriv_c * a * i * (1 - i); float d_ainput = deriv_c * i * (1 + a) * (1 - a); _derivc = deriv_c; outputA[output_dimension*w + gidx] = d_ainput; outputI[output_dimension*w + gidx] = d_iinput; outputF[output_dimension*w + gidx] = d_finput; outputO[output_dimension*w + gidx] = d_oinput; __syncthreads(); // make sure all threads have read _h before overwrite it derivA[idx] = d_ainput; derivI[idx] = d_iinput; derivF[idx] = d_finput; derivO[idx] = d_oinput; __syncthreads(); // make sure all writes are done before any thread read it } } else { //forward LSTM int mpoolingIdx = maxpooling_index[output_dimension * idy + idx]; for (int w = wordEnd - 1; w >= wordBegin; w--) { derivh = 0; if (mpoolingIdx == w) derivh += derivup[output_dimension * idy + idx]; if (w < wordEnd - 1) { for (int j = 0; j < blockDim.x; j++) { derivh += reweight[idx*blockDim.x + j] * derivA[j]; derivh += reweight[2 * blocksize + idx*blockDim.x + j] * derivI[j]; derivh += reweight[4 * blocksize + idx*blockDim.x + j] * derivF[j]; derivh += reweight[6 * blocksize + idx*blockDim.x + j] * derivO[j]; } } c_tanh = tanhf(outputC[output_dimension*w + idx]); o = outputO[output_dimension*w + idx]; a = outputA[output_dimension*w + idx]; i = outputI[output_dimension*w + idx]; f = outputF[output_dimension*w + idx]; float d_oinput = derivh * c_tanh * o * (1 - o); deriv_c = derivh * o * (1 + c_tanh) * (1 - c_tanh); if (w < wordEnd - 1) deriv_c += f * _derivc; float d_finput; if (w > wordBegin) d_finput = deriv_c * outputC[output_dimension*(w - 1) + idx] * f * (1 - f); else d_finput = 0; float d_iinput = deriv_c * a * i * (1 - i); float d_ainput = deriv_c * i * (1 + a) * (1 - a); _derivc = deriv_c; outputA[output_dimension*w + idx] = d_ainput; outputI[output_dimension*w + idx] = d_iinput; outputF[output_dimension*w + idx] = d_finput; outputO[output_dimension*w + idx] = d_oinput; __syncthreads(); // make sure all threads have read _h before overwrite it derivA[idx] = d_ainput; derivI[idx] = d_iinput; derivF[idx] = d_finput; derivO[idx] = d_oinput; __syncthreads(); // make sure all writes are done before any thread read it } } } void cuda_LSTM_Sequence_Backward(int * Smp_Index, int batchsize, float * reweight, int * maxpooling_index, float * derivup, float * outputA, float * outputI, float * outputF, float * outputO, float * outputC, float * output, int output_dimension) { int hdim = output_dimension / 2; dim3 thread_tail(hdim, 1); dim3 block_tail(2, batchsize); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_lstm_sequence_backward<<<block_tail, thread_tail>>>(Smp_Index, batchsize, reweight, maxpooling_index, derivup, outputA, outputI, outputF, outputO, outputC, output, output_dimension, hdim*hdim); } __global__ void cuda_lstm_weight_deriv(uint32_t * Smp_Index1, uint32_t * Smp_Index2, uint32_t * Smp_Index3, uint32_t * Word_Index1, uint32_t * Word_Index2, uint32_t * Word_Index3, uint32_t Word_SeqLen1, uint32_t Word_SeqLen2, uint32_t Word_SeqLen3, float * wordLT, float * grad, float * outA1, float * outA2, float * outA3, float * outI1, float * outI2, float * outI3, float * outF1, float * outF2, float * outF3, float * outO1, float * outO2, float * outO3, float * h1, float * h2, float * h3, uint32_t fea_dimension, uint32_t output_dimension, uint32_t b_reweight, uint32_t hdim, uint32_t blocksize) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t ylimit = b_reweight == 1 ? hdim : fea_dimension; if (idx < 4 * output_dimension && idy < ylimit) { uint32_t rev = (idx / hdim) % 2; float gradient = 0.0; uint32_t relativeIdx = idx % hdim; uint32_t maxlen = Word_SeqLen1 > Word_SeqLen2 ? Word_SeqLen1 : Word_SeqLen2; if (Word_SeqLen3 > maxlen) maxlen = Word_SeqLen3; float * outD1, *outD2, *outD3; uint32_t startpos = 0; if (idx < output_dimension) { outD1 = outA1; outD2 = outA2; outD3 = outA3; startpos = rev == 0 ? 0 : blocksize; } else if (idx < 2 * output_dimension) { outD1 = outI1; outD2 = outI2; outD3 = outI3; startpos = rev == 0 ? 2 * blocksize : 3 * blocksize; } else if (idx < 3 * output_dimension) { outD1 = outF1; outD2 = outF2; outD3 = outF3; startpos = rev == 0 ? 4 * blocksize : 5 * blocksize; } else { outD1 = outO1; outD2 = outO2; outD3 = outO3; startpos = rev == 0 ? 6 * blocksize : 7 * blocksize; } uint32_t smpidx1 = 0, smpidx2 = 0, smpidx3 = 0; uint32_t boundary1 = Smp_Index1[0], boundary2 = Smp_Index2[0], boundary3 = Smp_Index3[0]; uint32_t firstw1 = 1, firstw2 = 1, firstw3 = 1; for (uint32_t pos = 0; pos < maxlen; pos++) { if (pos < Word_SeqLen1) { if (firstw1 == 1) { firstw1 = 0; if (rev == 0 && (b_reweight == 1 || outD1 == outF1)) // no computation since it is the first word, and there is no input for recurrent weight, or forget gate derivative is definitely zero (since s_t-1 = 0) continue; } else if (pos == boundary1 - 1) /// last word of the current sentence { if (!(boundary1 == Word_SeqLen1)) { boundary1 = Smp_Index1[++smpidx1]; firstw1 = 1; // next is the first word of the next sentence } if (rev == 1 && (b_reweight == 1 || outD1 == outF1)) continue; } if (b_reweight == 0) gradient += outD1[output_dimension * pos + rev * hdim + relativeIdx] * wordLT[fea_dimension * Word_Index1[pos] + idy]; else gradient += outD1[output_dimension * pos + rev * hdim + relativeIdx] * h1[output_dimension * (rev == 1 ? (pos + 1) : (pos - 1)) + rev * hdim + idy]; } if (pos < Word_SeqLen2) { if (firstw2 == 1) { firstw2 = 0; if (rev == 0 && (b_reweight == 1 || outD2 == outF2)) // no computation since it is the first word, and there is no input for recurrent weight, or forget gate derivative is definitely zero (since s_t-1 = 0) continue; } else if (pos == boundary2 - 1) /// last word of the current sentence { if (!(boundary2 == Word_SeqLen2)) { boundary2 = Smp_Index2[++smpidx2]; firstw2 = 1; // next is the first word of the next sentence } if (rev == 1 && (b_reweight == 1 || outD2 == outF2)) continue; } if (b_reweight == 0) gradient += outD2[output_dimension * pos + rev * hdim + relativeIdx] * wordLT[fea_dimension * Word_Index2[pos] + idy]; else gradient += outD2[output_dimension * pos + rev * hdim + relativeIdx] * h2[output_dimension * (rev == 1 ? (pos + 1) : (pos - 1)) + rev * hdim + idy]; } if (pos < Word_SeqLen3) { if (firstw3 == 1) { firstw3 = 0; if (rev == 0 && (b_reweight == 1 || outD3 == outF3)) // no computation since it is the first word, and there is no input for recurrent weight, or forget gate derivative is definitely zero (since s_t-1 = 0) continue; } else if (pos == boundary3 - 1) /// last word of the current sentence { if (!(boundary3 == Word_SeqLen3)) { boundary3 = Smp_Index3[++smpidx3]; firstw3 = 1; // next is the first word of the next sentence } if (rev == 1 && (b_reweight == 1 || outD3 == outF3)) continue; } if (b_reweight == 0) gradient += outD3[output_dimension * pos + rev * hdim + relativeIdx] * wordLT[fea_dimension * Word_Index3[pos] + idy]; else gradient += outD3[output_dimension * pos + rev * hdim + relativeIdx] * h3[output_dimension * (rev == 1 ? (pos + 1) : (pos - 1)) + rev * hdim + idy]; } } grad[startpos + hdim * idy + relativeIdx] = gradient; } } void cuda_LSTM_Weight_Deriv(uint32_t * Smp_Index1, uint32_t * Smp_Index2, uint32_t * Smp_Index3, uint32_t * Word_Index1, uint32_t * Word_Index2, uint32_t * Word_Index3, uint32_t Word_SeqLen1, uint32_t Word_SeqLen2, uint32_t Word_SeqLen3, float * wordLT, float * grad, float * outA1, float * outA2, float * outA3, float * outI1, float * outI2, float * outI3, float * outF1, float * outF2, float * outF3, float * outO1, float * outO2, float * outO3, float * h1, float * h2, float * h3, uint32_t fea_dimension, uint32_t output_dimension, uint32_t b_reweight) { uint32_t hdim = output_dimension / 2; uint32_t input_dim = 0; if (b_reweight == 1) input_dim = hdim; else input_dim = fea_dimension; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((4 * output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (input_dim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_lstm_weight_deriv<<<block_tail, thread_tail>>>(Smp_Index1, Smp_Index2, Smp_Index3, Word_Index1, Word_Index2, Word_Index3, Word_SeqLen1, Word_SeqLen2, Word_SeqLen3, wordLT, grad, outA1, outA2, outA3, outI1, outI2, outI3, outF1, outF2, outF3, outO1, outO2, outO3, h1, h2, h3, fea_dimension, output_dimension, b_reweight, hdim, hdim*hdim); } __global__ void cuda_lstm_weight_deriv_sup(uint32_t * Smp_Index1, uint32_t * Word_Index1, uint32_t Word_SeqLen1, float * wordLT, float * grad, float * outA1, float * outI1, float * outF1, float * outO1, float * h1, uint32_t fea_dimension, uint32_t output_dimension, uint32_t b_reweight, uint32_t hdim, uint32_t blocksize) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; uint32_t ylimit = b_reweight == 1 ? hdim : fea_dimension; if (idx < 4 * output_dimension && idy < ylimit) { uint32_t rev = (idx / hdim) % 2; float gradient = 0.0; uint32_t relativeIdx = idx % hdim; float * outD1; uint32_t startpos = 0; if (idx < output_dimension) { outD1 = outA1; startpos = rev == 0 ? 0 : blocksize; } else if (idx < 2 * output_dimension) { outD1 = outI1; startpos = rev == 0 ? 2 * blocksize : 3 * blocksize; } else if (idx < 3 * output_dimension) { outD1 = outF1; startpos = rev == 0 ? 4 * blocksize : 5 * blocksize; } else { outD1 = outO1; startpos = rev == 0 ? 6 * blocksize : 7 * blocksize; } uint32_t smpidx1 = 0; uint32_t boundary1 = Smp_Index1[0]; uint32_t firstw1 = 1; for (uint32_t pos = 0; pos < Word_SeqLen1; pos++) { if (firstw1 == 1) { firstw1 = 0; if (rev == 0 && (b_reweight == 1 || outD1 == outF1)) // no computation since it is the first word, and there is no input for recurrent weight, or forget gate derivative is definitely zero (since s_t-1 = 0) continue; } else if (pos == boundary1 - 1) /// last word of the current sentence { if (!(boundary1 == Word_SeqLen1)) { boundary1 = Smp_Index1[++smpidx1]; firstw1 = 1; // next is the first word of the next sentence } if (rev == 1 && (b_reweight == 1 || outD1 == outF1)) continue; } if (b_reweight == 0) gradient += outD1[output_dimension * pos + rev * hdim + relativeIdx] * wordLT[fea_dimension * Word_Index1[pos] + idy]; else gradient += outD1[output_dimension * pos + rev * hdim + relativeIdx] * h1[output_dimension * (rev == 1 ? (pos + 1) : (pos - 1)) + rev * hdim + idy]; } grad[startpos + hdim * idy + relativeIdx] = gradient; } } void cuda_LSTM_Weight_Deriv_Sup(uint32_t * Smp_Index1, uint32_t * Word_Index1, uint32_t Word_SeqLen1, float * wordLT, float * grad, float * outA1, float * outI1, float * outF1, float * outO1, float * h1, uint32_t fea_dimension, uint32_t output_dimension, uint32_t b_reweight) { uint32_t hdim = output_dimension / 2; uint32_t input_dim = 0; if (b_reweight == 1) input_dim = hdim; else input_dim = fea_dimension; dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((4 * output_dimension + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (input_dim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_lstm_weight_deriv_sup<<<block_tail, thread_tail>>>(Smp_Index1, Word_Index1, Word_SeqLen1, wordLT, grad, outA1, outI1, outF1, outO1, h1, fea_dimension, output_dimension, b_reweight, hdim, hdim*hdim); } __global__ void cuda_lstm_bias_deriv(uint32_t Word_SeqLen1, uint32_t Word_SeqLen2, uint32_t Word_SeqLen3, float * grad, float * outA1, float * outA2, float * outA3, float * outI1, float * outI2, float * outI3, float * outF1, float * outF2, float * outF3, float * outO1, float * outO2, float * outO3, uint32_t output_dimension) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < 4 * output_dimension) { float gradient = 0.0; uint32_t maxlen = Word_SeqLen1 > Word_SeqLen2 ? Word_SeqLen1 : Word_SeqLen2; if (Word_SeqLen3 > maxlen) maxlen = Word_SeqLen3; float * outD1, *outD2, *outD3; if (idx < output_dimension) { outD1 = outA1; outD2 = outA2; outD3 = outA3; } else if (idx < 2 * output_dimension) { outD1 = outI1; outD2 = outI2; outD3 = outI3; } else if (idx < 3 * output_dimension) { outD1 = outF1; outD2 = outF2; outD3 = outF3; } else { outD1 = outO1; outD2 = outO2; outD3 = outO3; } uint32_t ridx = idx % output_dimension; for (uint32_t pos = 0; pos < maxlen; pos++) { if (pos < Word_SeqLen1) gradient += outD1[output_dimension * pos + ridx]; if (pos < Word_SeqLen2) gradient += outD2[output_dimension * pos + ridx]; if (pos < Word_SeqLen3) gradient += outD3[output_dimension * pos + ridx]; } grad[idx] = gradient; } } void cuda_LSTM_Bias_Deriv(uint32_t Word_SeqLen1, uint32_t Word_SeqLen2, uint32_t Word_SeqLen3, float * grad, float * outA1, float * outA2, float * outA3, float * outI1, float * outI2, float * outI3, float * outF1, float * outF2, float * outF3, float * outO1, float * outO2, float * outO3, uint32_t output_dimension) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (4 * output_dimension + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_lstm_bias_deriv<<<nBlockPerGrid, nThreadPerBlock>>>(Word_SeqLen1, Word_SeqLen2, Word_SeqLen3, grad, outA1, outA2, outA3, outI1, outI2, outI3, outF1, outF2, outF3, outO1, outO2, outO3, output_dimension); } __global__ void cuda_lstm_bias_deriv_sup(uint32_t Word_SeqLen1, float * grad, float * outA1, float * outI1, float * outF1, float * outO1, uint32_t output_dimension) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < 4 * output_dimension) { float gradient = 0.0; float * outD1; if (idx < output_dimension) { outD1 = outA1; } else if (idx < 2 * output_dimension) { outD1 = outI1; } else if (idx < 3 * output_dimension) { outD1 = outF1; } else { outD1 = outO1; } uint32_t ridx = idx % output_dimension; for (uint32_t pos = 0; pos < Word_SeqLen1; pos++) { gradient += outD1[output_dimension * pos + ridx]; } grad[idx] = gradient; } } void cuda_LSTM_Bias_Deriv_Sup(uint32_t Word_SeqLen1, float * grad, float * outA1, float * outI1, float * outF1, float * outO1, uint32_t output_dimension) { uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; uint32_t nBlockPerGrid = (4 * output_dimension + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_lstm_bias_deriv_sup<<<nBlockPerGrid, nThreadPerBlock>>>(Word_SeqLen1, grad, outA1, outI1, outF1, outO1, output_dimension); } __global__ void cuda_lstm_compute_wvderiv(uint32_t Word_SeqLen, float * weight, float * grad, float * outA, float * outI, float * outF, float * outO, uint32_t fea_dim, uint32_t output_dim, uint32_t hdim, uint32_t blocksize) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; uint32_t idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx < fea_dim && idy < Word_SeqLen) { float gradient = 0.0; for (uint32_t di = 0; di < output_dim; di++) { if (di < hdim) { gradient += weight[idx * hdim + di] * outA[idy * output_dim + di]; gradient += weight[blocksize * 2 + idx * hdim + di] * outI[idy * output_dim + di]; gradient += weight[blocksize * 4 + idx * hdim + di] * outF[idy * output_dim + di]; gradient += weight[blocksize * 6 + idx * hdim + di] * outO[idy * output_dim + di]; } else { gradient += weight[blocksize + idx * hdim + (di - hdim)] * outA[idy * output_dim + di]; gradient += weight[blocksize * 3 + idx * hdim + (di - hdim)] * outI[idy * output_dim + di]; gradient += weight[blocksize * 5 + idx * hdim + (di - hdim)] * outF[idy * output_dim + di]; gradient += weight[blocksize * 7 + idx * hdim + (di - hdim)] * outO[idy * output_dim + di]; } } grad[idy * fea_dim + idx] = gradient; } } void cuda_LSTM_Compute_WVDeriv(uint32_t Word_SeqLen, float * weight, float * grad, float * outA, float * outI, float * outF, float * outO, uint32_t fea_dim, uint32_t output_dim) { dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); dim3 block_tail((fea_dim + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (Word_SeqLen + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); //uint32_t nThreadPerBlock = DEFAULT_THREAD_PER_BLOCK; //uint32_t nBlockPerGrid = ( m * n + DEFAULT_THREAD_PER_BLOCK - 1) / DEFAULT_THREAD_PER_BLOCK; cuda_lstm_compute_wvderiv<<<block_tail, thread_tail>>>(Word_SeqLen, weight, grad, outA, outI, outF, outO, fea_dim, output_dim, output_dim/2, (output_dim/2)*fea_dim); }
2f760816a698cf6ba63f067a5851c8bb0dab91e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void PolynomialFunctionKernel_Double(float a3, float a2, float a1, float a0, double* input, double* output, int size) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; if (id < size) { double x = input[id]; output[id] = a3 * x * x * x + a2 * x * x + a1 * x + a0; } }
2f760816a698cf6ba63f067a5851c8bb0dab91e2.cu
#include "includes.h" __global__ void PolynomialFunctionKernel_Double(float a3, float a2, float a1, float a0, double* input, double* output, int size) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; if (id < size) { double x = input[id]; output[id] = a3 * x * x * x + a2 * x * x + a1 * x + a0; } }
691468a07dadf3f2d26f679d8868f37b91b92201.hip
// !!! This is a file automatically generated by hipify!!! /* * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <chrono> #include <cstdio> #include <cstdlib> #include <hip/hip_runtime.h> #include "params.h" __device__ float sigmoid(const float x) { return 1.0f / (1.0f + expf(-x)); } __global__ void postprocess ( const float *__restrict__ cls_input, float *__restrict__ box_input, const float *__restrict__ dir_cls_input, const float *__restrict__ anchors, const float *__restrict__ anchor_bottom_heights, float *__restrict__ bndbox_output, int *__restrict__ object_counter, const float min_x_range, const float max_x_range, const float min_y_range, const float max_y_range, const int feature_x_size, const int feature_y_size, const int num_anchors, const int num_classes, const int num_box_values, const float score_thresh, const float dir_offset) { int loc_index = blockIdx.x; int ith_anchor = threadIdx.x; if (ith_anchor >= num_anchors) return; int col = loc_index % feature_x_size; int row = loc_index / feature_x_size; float x_offset = min_x_range + col * (max_x_range - min_x_range) / (feature_x_size - 1); float y_offset = min_y_range + row * (max_y_range - min_y_range) / (feature_y_size - 1); int cls_offset = loc_index * num_anchors * num_classes + ith_anchor * num_classes; float dev_cls[2] = {-1.f, 0.f}; const float *scores = cls_input + cls_offset; float max_score = sigmoid(scores[0]); int cls_id = 0; for (int i = 1; i < num_classes; i++) { float cls_score = sigmoid(scores[i]); if (cls_score > max_score) { max_score = cls_score; cls_id = i; } } dev_cls[0] = static_cast<float>(cls_id); dev_cls[1] = max_score; if (dev_cls[1] >= score_thresh) { const int box_offset = loc_index * num_anchors * num_box_values + ith_anchor * num_box_values; const int dir_cls_offset = loc_index * num_anchors * 2 + ith_anchor * 2; const float *anchor_ptr = anchors + ith_anchor * 4; const float z_offset = anchor_ptr[2] / 2 + anchor_bottom_heights[ith_anchor / 2]; const float anchor[7] = {x_offset, y_offset, z_offset, anchor_ptr[0], anchor_ptr[1], anchor_ptr[2], anchor_ptr[3]}; float *box_encodings = box_input + box_offset; const float xa = anchor[0]; const float ya = anchor[1]; const float za = anchor[2]; const float dxa = anchor[3]; const float dya = anchor[4]; const float dza = anchor[5]; const float ra = anchor[6]; const float diagonal = sqrtf(dxa * dxa + dya * dya); box_encodings[0] = box_encodings[0] * diagonal + xa; box_encodings[1] = box_encodings[1] * diagonal + ya; box_encodings[2] = box_encodings[2] * dza + za; box_encodings[3] = expf(box_encodings[3]) * dxa; box_encodings[4] = expf(box_encodings[4]) * dya; box_encodings[5] = expf(box_encodings[5]) * dza; box_encodings[6] = box_encodings[6] + ra; const int dir_label = dir_cls_input[dir_cls_offset] > dir_cls_input[dir_cls_offset + 1] ? 0 : 1; const float period = (float)M_PI; const float val = box_input[box_offset + 6] - dir_offset; const float dir_rot = val - floorf(val / (period + 1e-8f)) * period; const float yaw = dir_rot + dir_offset + period * dir_label; int resCount = (int)atomicAdd(object_counter, 1); bndbox_output[0] = resCount+1; float *data = bndbox_output + 1 + resCount * 9; data[0] = box_input[box_offset]; data[1] = box_input[box_offset + 1]; data[2] = box_input[box_offset + 2]; data[3] = box_input[box_offset + 3]; data[4] = box_input[box_offset + 4]; data[5] = box_input[box_offset + 5]; data[6] = yaw; data[7] = dev_cls[0]; data[8] = dev_cls[1]; } } int main(int argc, char* argv[]) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } const int repeat = atoi(argv[1]); Params p; // constant values defined in params.h const float min_x_range = p.min_x_range; const float max_x_range = p.max_x_range; const float min_y_range = p.min_y_range; const float max_y_range = p.max_y_range; const int feature_x_size = p.feature_x_size; const int feature_y_size = p.feature_y_size; const int num_anchors = p.num_anchors; const int num_classes = p.num_classes; const int num_box_values = p.num_box_values; const float score_thresh = p.score_thresh; const float dir_offset = p.dir_offset; const int len_per_anchor = p.len_per_anchor; const int num_dir_bins = p.num_dir_bins; const int feature_size = feature_x_size * feature_y_size; const int feature_anchor_size = feature_size * num_anchors; const int cls_size = feature_anchor_size * num_classes; const int box_size = feature_anchor_size * num_box_values; const int dir_cls_size = feature_anchor_size * num_dir_bins; const int bndbox_size = feature_anchor_size * 9 + 1; const int cls_size_byte = cls_size * sizeof(float); const int box_size_byte = box_size * sizeof(float); const int dir_cls_size_byte = dir_cls_size * sizeof(float); const int bndbox_size_byte = bndbox_size * sizeof(float); // input of the post-process kernel float *h_cls_input = (float*) malloc (cls_size_byte); float *h_box_input = (float*) malloc (box_size_byte); float *h_dir_cls_input = (float*) malloc (dir_cls_size_byte); // output of the post-process kernel float *h_bndbox_output = (float*) malloc (bndbox_size_byte); // random values srand(123); for (int i = 0; i < cls_size; i++) h_cls_input[i] = rand() / (float)RAND_MAX; for (int i = 0; i < box_size; i++) h_box_input[i] = rand() / (float)RAND_MAX; for (int i = 0; i < dir_cls_size; i++) h_dir_cls_input[i] = rand() / (float)RAND_MAX; float *d_cls_input, *d_box_input, *d_dir_cls_input, *d_bndbox_output; float *d_anchors, *d_anchor_bottom_heights; int *d_object_counter; hipMalloc((void **)&d_cls_input, cls_size_byte); hipMalloc((void **)&d_box_input, box_size_byte); hipMalloc((void **)&d_dir_cls_input, dir_cls_size_byte); hipMalloc((void **)&d_bndbox_output, bndbox_size_byte); hipMemcpy(d_cls_input, h_cls_input, cls_size_byte, hipMemcpyHostToDevice); hipMemcpy(d_dir_cls_input, h_dir_cls_input, dir_cls_size_byte, hipMemcpyHostToDevice); hipMalloc((void **)&d_anchors, num_anchors * len_per_anchor * sizeof(float)); hipMalloc((void **)&d_anchor_bottom_heights, num_classes * sizeof(float)); hipMalloc((void **)&d_object_counter, sizeof(int)); hipMemcpy(d_anchors, p.anchors, num_anchors * len_per_anchor * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_anchor_bottom_heights, p.anchor_bottom_heights, num_classes * sizeof(float), hipMemcpyHostToDevice); double time = 0.0; dim3 threads (num_anchors); dim3 blocks (feature_size); for (int i = 0; i < repeat; i++) { hipMemcpy(d_box_input, h_box_input, box_size_byte, hipMemcpyHostToDevice); hipMemset(d_object_counter, 0, sizeof(int)); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( postprocess), dim3(blocks), dim3(threads), 0, 0, d_cls_input, d_box_input, d_dir_cls_input, d_anchors, d_anchor_bottom_heights, d_bndbox_output, d_object_counter, min_x_range, max_x_range, min_y_range, max_y_range, feature_x_size, feature_y_size, num_anchors, num_classes, num_box_values, score_thresh, dir_offset); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); } printf("Average execution time of postprocess kernel: %f (us)\n", (time * 1e-3f) / repeat); hipMemcpy(h_bndbox_output, d_bndbox_output, bndbox_size_byte, hipMemcpyDeviceToHost); double checksum = 0.0; for (int i = 0; i < bndbox_size; i++) checksum += h_bndbox_output[i]; printf("checksum = %lf\n", checksum / bndbox_size); hipFree(d_anchors); hipFree(d_anchor_bottom_heights); hipFree(d_object_counter); hipFree(d_cls_input); hipFree(d_box_input); hipFree(d_dir_cls_input); hipFree(d_bndbox_output); free(h_cls_input); free(h_box_input); free(h_dir_cls_input); free(h_bndbox_output); return 0; }
691468a07dadf3f2d26f679d8868f37b91b92201.cu
/* * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <chrono> #include <cstdio> #include <cstdlib> #include <cuda.h> #include "params.h" __device__ float sigmoid(const float x) { return 1.0f / (1.0f + expf(-x)); } __global__ void postprocess ( const float *__restrict__ cls_input, float *__restrict__ box_input, const float *__restrict__ dir_cls_input, const float *__restrict__ anchors, const float *__restrict__ anchor_bottom_heights, float *__restrict__ bndbox_output, int *__restrict__ object_counter, const float min_x_range, const float max_x_range, const float min_y_range, const float max_y_range, const int feature_x_size, const int feature_y_size, const int num_anchors, const int num_classes, const int num_box_values, const float score_thresh, const float dir_offset) { int loc_index = blockIdx.x; int ith_anchor = threadIdx.x; if (ith_anchor >= num_anchors) return; int col = loc_index % feature_x_size; int row = loc_index / feature_x_size; float x_offset = min_x_range + col * (max_x_range - min_x_range) / (feature_x_size - 1); float y_offset = min_y_range + row * (max_y_range - min_y_range) / (feature_y_size - 1); int cls_offset = loc_index * num_anchors * num_classes + ith_anchor * num_classes; float dev_cls[2] = {-1.f, 0.f}; const float *scores = cls_input + cls_offset; float max_score = sigmoid(scores[0]); int cls_id = 0; for (int i = 1; i < num_classes; i++) { float cls_score = sigmoid(scores[i]); if (cls_score > max_score) { max_score = cls_score; cls_id = i; } } dev_cls[0] = static_cast<float>(cls_id); dev_cls[1] = max_score; if (dev_cls[1] >= score_thresh) { const int box_offset = loc_index * num_anchors * num_box_values + ith_anchor * num_box_values; const int dir_cls_offset = loc_index * num_anchors * 2 + ith_anchor * 2; const float *anchor_ptr = anchors + ith_anchor * 4; const float z_offset = anchor_ptr[2] / 2 + anchor_bottom_heights[ith_anchor / 2]; const float anchor[7] = {x_offset, y_offset, z_offset, anchor_ptr[0], anchor_ptr[1], anchor_ptr[2], anchor_ptr[3]}; float *box_encodings = box_input + box_offset; const float xa = anchor[0]; const float ya = anchor[1]; const float za = anchor[2]; const float dxa = anchor[3]; const float dya = anchor[4]; const float dza = anchor[5]; const float ra = anchor[6]; const float diagonal = sqrtf(dxa * dxa + dya * dya); box_encodings[0] = box_encodings[0] * diagonal + xa; box_encodings[1] = box_encodings[1] * diagonal + ya; box_encodings[2] = box_encodings[2] * dza + za; box_encodings[3] = expf(box_encodings[3]) * dxa; box_encodings[4] = expf(box_encodings[4]) * dya; box_encodings[5] = expf(box_encodings[5]) * dza; box_encodings[6] = box_encodings[6] + ra; const int dir_label = dir_cls_input[dir_cls_offset] > dir_cls_input[dir_cls_offset + 1] ? 0 : 1; const float period = (float)M_PI; const float val = box_input[box_offset + 6] - dir_offset; const float dir_rot = val - floorf(val / (period + 1e-8f)) * period; const float yaw = dir_rot + dir_offset + period * dir_label; int resCount = (int)atomicAdd(object_counter, 1); bndbox_output[0] = resCount+1; float *data = bndbox_output + 1 + resCount * 9; data[0] = box_input[box_offset]; data[1] = box_input[box_offset + 1]; data[2] = box_input[box_offset + 2]; data[3] = box_input[box_offset + 3]; data[4] = box_input[box_offset + 4]; data[5] = box_input[box_offset + 5]; data[6] = yaw; data[7] = dev_cls[0]; data[8] = dev_cls[1]; } } int main(int argc, char* argv[]) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } const int repeat = atoi(argv[1]); Params p; // constant values defined in params.h const float min_x_range = p.min_x_range; const float max_x_range = p.max_x_range; const float min_y_range = p.min_y_range; const float max_y_range = p.max_y_range; const int feature_x_size = p.feature_x_size; const int feature_y_size = p.feature_y_size; const int num_anchors = p.num_anchors; const int num_classes = p.num_classes; const int num_box_values = p.num_box_values; const float score_thresh = p.score_thresh; const float dir_offset = p.dir_offset; const int len_per_anchor = p.len_per_anchor; const int num_dir_bins = p.num_dir_bins; const int feature_size = feature_x_size * feature_y_size; const int feature_anchor_size = feature_size * num_anchors; const int cls_size = feature_anchor_size * num_classes; const int box_size = feature_anchor_size * num_box_values; const int dir_cls_size = feature_anchor_size * num_dir_bins; const int bndbox_size = feature_anchor_size * 9 + 1; const int cls_size_byte = cls_size * sizeof(float); const int box_size_byte = box_size * sizeof(float); const int dir_cls_size_byte = dir_cls_size * sizeof(float); const int bndbox_size_byte = bndbox_size * sizeof(float); // input of the post-process kernel float *h_cls_input = (float*) malloc (cls_size_byte); float *h_box_input = (float*) malloc (box_size_byte); float *h_dir_cls_input = (float*) malloc (dir_cls_size_byte); // output of the post-process kernel float *h_bndbox_output = (float*) malloc (bndbox_size_byte); // random values srand(123); for (int i = 0; i < cls_size; i++) h_cls_input[i] = rand() / (float)RAND_MAX; for (int i = 0; i < box_size; i++) h_box_input[i] = rand() / (float)RAND_MAX; for (int i = 0; i < dir_cls_size; i++) h_dir_cls_input[i] = rand() / (float)RAND_MAX; float *d_cls_input, *d_box_input, *d_dir_cls_input, *d_bndbox_output; float *d_anchors, *d_anchor_bottom_heights; int *d_object_counter; cudaMalloc((void **)&d_cls_input, cls_size_byte); cudaMalloc((void **)&d_box_input, box_size_byte); cudaMalloc((void **)&d_dir_cls_input, dir_cls_size_byte); cudaMalloc((void **)&d_bndbox_output, bndbox_size_byte); cudaMemcpy(d_cls_input, h_cls_input, cls_size_byte, cudaMemcpyHostToDevice); cudaMemcpy(d_dir_cls_input, h_dir_cls_input, dir_cls_size_byte, cudaMemcpyHostToDevice); cudaMalloc((void **)&d_anchors, num_anchors * len_per_anchor * sizeof(float)); cudaMalloc((void **)&d_anchor_bottom_heights, num_classes * sizeof(float)); cudaMalloc((void **)&d_object_counter, sizeof(int)); cudaMemcpy(d_anchors, p.anchors, num_anchors * len_per_anchor * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_anchor_bottom_heights, p.anchor_bottom_heights, num_classes * sizeof(float), cudaMemcpyHostToDevice); double time = 0.0; dim3 threads (num_anchors); dim3 blocks (feature_size); for (int i = 0; i < repeat; i++) { cudaMemcpy(d_box_input, h_box_input, box_size_byte, cudaMemcpyHostToDevice); cudaMemset(d_object_counter, 0, sizeof(int)); cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); postprocess<<<blocks, threads>>> ( d_cls_input, d_box_input, d_dir_cls_input, d_anchors, d_anchor_bottom_heights, d_bndbox_output, d_object_counter, min_x_range, max_x_range, min_y_range, max_y_range, feature_x_size, feature_y_size, num_anchors, num_classes, num_box_values, score_thresh, dir_offset); cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); } printf("Average execution time of postprocess kernel: %f (us)\n", (time * 1e-3f) / repeat); cudaMemcpy(h_bndbox_output, d_bndbox_output, bndbox_size_byte, cudaMemcpyDeviceToHost); double checksum = 0.0; for (int i = 0; i < bndbox_size; i++) checksum += h_bndbox_output[i]; printf("checksum = %lf\n", checksum / bndbox_size); cudaFree(d_anchors); cudaFree(d_anchor_bottom_heights); cudaFree(d_object_counter); cudaFree(d_cls_input); cudaFree(d_box_input); cudaFree(d_dir_cls_input); cudaFree(d_bndbox_output); free(h_cls_input); free(h_box_input); free(h_dir_cls_input); free(h_bndbox_output); return 0; }
b351e246aa0b08d379ab3abbea20ba333ce1a64c.hip
// !!! This is a file automatically generated by hipify!!! /* **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,20,3 * dimB: 5,1024,20,3 ; * **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,20,3 * dimB: 5,1024,20,3 ; * **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,20,64 * dimB: 5,1024,20,64 ; * **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,20,64 * dimB: 5,1024,20,64 ; * **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,20,64 * dimB: 5,1024,20,64 ; * **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,1,64 * dimB: 5,1024,1,64 ; * **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,1,128 * dimB: 5,1024,1,64 ; * **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,1,192 * dimB: 5,1024,1,128 ; */ #include <stdio.h> #include <hip/hip_runtime_api.h> #include <npp.h> #include <hip/hip_vector_types.h> #define BLOCK_SIZE 1024 /// /// \param g_idata /// \param g_odata /// \param dim0 /// \param dim1 /// \param dim2 /// \param dim3A /// \param dim3B /// \param concatAxis /// \param EPT Elements Per Thread __global__ void kernel_concat_try01( const float * __restrict__ g_iA, const float * __restrict__ g_iB, float * __restrict__ g_o, const unsigned int dim0, const unsigned int dim1, const unsigned int dim2, const unsigned int dim3A, const unsigned int dim3B, const int concatAxis, const int EPT){ if(concatAxis==3) { const unsigned int dim3 = dim3A + dim3B; const unsigned long dim123 = dim1*dim2*dim3; const unsigned long dim23 = dim2*dim3; const unsigned long _limit = dim0*dim123; //const unsigned long lenA = dim0*dim1*dim2*dim3A; //const unsigned long lenB = dim0*dim1*dim2*dim3B; unsigned long tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned long idx = tid * EPT; unsigned long d0 = idx / dim123; unsigned long d1 = (idx % dim123) / dim23; unsigned long d2 = (idx % dim23 ) / dim3; unsigned long d3 = (idx % (dim3) ) / 1; unsigned long indxS; for (int i = 0; i < EPT && idx<_limit; i++) { if (d3 < dim3A) { indxS = d0*dim1*dim2*dim3A + d1*dim2*dim3A + d2*dim3A + (d3); //if (indxS >= lenA) printf("A*bid: %06d, thid: %06d, indxS: %06lu, d3: %06lu, idx: %06lu\n", blockIdx.x, threadIdx.x, indxS,d3, idx); g_o[idx] = g_iA[indxS]; } else { indxS = d0*dim1*dim2*dim3B + d1*dim2*dim3B + d2*dim3B + (d3-dim3A); //if (indxS >= lenB) printf("B*bid: %06d, thid: %06d, indxS: %06lu, d3: %06lu, idx: %06lu\n", blockIdx.x, threadIdx.x, indxS,d3, idx); g_o[idx] = g_iB[indxS]; } d3++; ///TODO: CHECK: What happens if d3 becomes greater than dim3? idx++; } } } void concat_try01( float* g_iA, float* g_iB, float* g_o, const unsigned int dim0A, const unsigned int dim1A, const unsigned int dim2A, const unsigned int dim3A, const unsigned int dim0B, const unsigned int dim1B, const unsigned int dim2B, const unsigned int dim3B, const unsigned int concatAxis) { if (concatAxis==3){ unsigned long block,grid, EPT , dim3 = dim3A + dim3B; EPT = 4; //Elements(of output tensor) Per Thread block = BLOCK_SIZE; grid = (dim0A*dim1A*dim2A*dim3 + block*EPT -1 )/(block*EPT); //printf("TensorA: %d,%d,%d,%d TensorB: %d,%d,%d,%d\n", dim0A,dim1A,dim2A,dim3A,dim0B,dim1B,dim2B,dim3B); //printf("BLOCKSIZE: %lu, GRID SIZE: %lu, EPT: %lu\n",block,grid,EPT); hipLaunchKernelGGL(( kernel_concat_try01) , dim3(grid), dim3(block) , 0, 0, g_iA,g_iB, g_o,dim0A,dim1A,dim2A,dim3A,dim3B,3,EPT); } else{ printf("concat_try01: ERROR-NOTIMPLEMENTED\n"); } }
b351e246aa0b08d379ab3abbea20ba333ce1a64c.cu
/* **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,20,3 * dimB: 5,1024,20,3 ; * **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,20,3 * dimB: 5,1024,20,3 ; * **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,20,64 * dimB: 5,1024,20,64 ; * **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,20,64 * dimB: 5,1024,20,64 ; * **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,20,64 * dimB: 5,1024,20,64 ; * **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,1,64 * dimB: 5,1024,1,64 ; * **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,1,128 * dimB: 5,1024,1,64 ; * **LA_Concat2: Rank: 4 concatDim: 3 dimA: 5,1024,1,192 * dimB: 5,1024,1,128 ; */ #include <stdio.h> #include <cuda_runtime_api.h> #include <npp.h> #include <vector_types.h> #define BLOCK_SIZE 1024 /// /// \param g_idata /// \param g_odata /// \param dim0 /// \param dim1 /// \param dim2 /// \param dim3A /// \param dim3B /// \param concatAxis /// \param EPT Elements Per Thread __global__ void kernel_concat_try01( const float * __restrict__ g_iA, const float * __restrict__ g_iB, float * __restrict__ g_o, const unsigned int dim0, const unsigned int dim1, const unsigned int dim2, const unsigned int dim3A, const unsigned int dim3B, const int concatAxis, const int EPT){ if(concatAxis==3) { const unsigned int dim3 = dim3A + dim3B; const unsigned long dim123 = dim1*dim2*dim3; const unsigned long dim23 = dim2*dim3; const unsigned long _limit = dim0*dim123; //const unsigned long lenA = dim0*dim1*dim2*dim3A; //const unsigned long lenB = dim0*dim1*dim2*dim3B; unsigned long tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned long idx = tid * EPT; unsigned long d0 = idx / dim123; unsigned long d1 = (idx % dim123) / dim23; unsigned long d2 = (idx % dim23 ) / dim3; unsigned long d3 = (idx % (dim3) ) / 1; unsigned long indxS; for (int i = 0; i < EPT && idx<_limit; i++) { if (d3 < dim3A) { indxS = d0*dim1*dim2*dim3A + d1*dim2*dim3A + d2*dim3A + (d3); //if (indxS >= lenA) printf("A*bid: %06d, thid: %06d, indxS: %06lu, d3: %06lu, idx: %06lu\n", blockIdx.x, threadIdx.x, indxS,d3, idx); g_o[idx] = g_iA[indxS]; } else { indxS = d0*dim1*dim2*dim3B + d1*dim2*dim3B + d2*dim3B + (d3-dim3A); //if (indxS >= lenB) printf("B*bid: %06d, thid: %06d, indxS: %06lu, d3: %06lu, idx: %06lu\n", blockIdx.x, threadIdx.x, indxS,d3, idx); g_o[idx] = g_iB[indxS]; } d3++; ///TODO: CHECK: What happens if d3 becomes greater than dim3? idx++; } } } void concat_try01( float* g_iA, float* g_iB, float* g_o, const unsigned int dim0A, const unsigned int dim1A, const unsigned int dim2A, const unsigned int dim3A, const unsigned int dim0B, const unsigned int dim1B, const unsigned int dim2B, const unsigned int dim3B, const unsigned int concatAxis) { if (concatAxis==3){ unsigned long block,grid, EPT , dim3 = dim3A + dim3B; EPT = 4; //Elements(of output tensor) Per Thread block = BLOCK_SIZE; grid = (dim0A*dim1A*dim2A*dim3 + block*EPT -1 )/(block*EPT); //printf("TensorA: %d,%d,%d,%d TensorB: %d,%d,%d,%d\n", dim0A,dim1A,dim2A,dim3A,dim0B,dim1B,dim2B,dim3B); //printf("BLOCKSIZE: %lu, GRID SIZE: %lu, EPT: %lu\n",block,grid,EPT); kernel_concat_try01 <<< grid, block >>> ( g_iA,g_iB, g_o,dim0A,dim1A,dim2A,dim3A,dim3B,3,EPT); } else{ printf("concat_try01: ERROR-NOTIMPLEMENTED\n"); } }
b18d19e7f7ce6a30849db21b343ffe7f054e87f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "YoloConfigs.h" #include "YoloLayer.h" using namespace Yolo; namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(const int cudaThread /*= 512*/):mThreadCount(cudaThread) { mClassCount = CLASS_NUM; mYoloKernel.clear(); mYoloKernel.push_back(yolo1); mYoloKernel.push_back(yolo2); mYoloKernel.push_back(yolo3); mKernelCount = mYoloKernel.size(); } YoloLayerPlugin::~YoloLayerPlugin() { if(mInputBuffer) CUDA_CHECK(hipHostFree(mInputBuffer)); if(mOutputBuffer) CUDA_CHECK(hipHostFree(mOutputBuffer)); } // create the plugin at runtime from a byte stream YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { using namespace Tn; const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mClassCount); read(d, mThreadCount); read(d, mKernelCount); mYoloKernel.resize(mKernelCount); auto kernelSize = mKernelCount*sizeof(YoloKernel); memcpy(mYoloKernel.data(),d,kernelSize); d += kernelSize; assert(d == a + length); } void YoloLayerPlugin::serialize(void* buffer) { using namespace Tn; char* d = static_cast<char*>(buffer), *a = d; write(d, mClassCount); write(d, mThreadCount); write(d, mKernelCount); auto kernelSize = mKernelCount*sizeof(YoloKernel); memcpy(d,mYoloKernel.data(),kernelSize); d += kernelSize; assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin::getSerializationSize() { return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size(); } int YoloLayerPlugin::initialize() { int totalCount = 0; for(const auto& yolo : mYoloKernel) totalCount += (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT; CUDA_CHECK(hipHostMalloc(&mInputBuffer, totalCount * sizeof(float), hipHostMallocDefault)); totalCount = 0;//detection count for(const auto& yolo : mYoloKernel) totalCount += yolo.width*yolo.height * CHECK_COUNT; CUDA_CHECK(hipHostMalloc(&mOutputBuffer, sizeof(float) + totalCount * sizeof(Detection), hipHostMallocDefault)); return 0; } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { //output the result to channel int totalCount = 0; for(const auto& yolo : mYoloKernel) totalCount += yolo.width*yolo.height * CHECK_COUNT * sizeof(Detection) / sizeof(float); return Dims3(totalCount + 1, 1, 1); } void YoloLayerPlugin::forwardCpu(const float*const * inputs, float* outputs, hipStream_t stream) { auto Logist = [=](float data){ return 1./(1. + exp(-data)); }; CUDA_CHECK(hipStreamSynchronize(stream)); int i = 0; float* inputData = (float *)mInputBuffer; for(const auto& yolo : mYoloKernel) { int size = (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT; CUDA_CHECK(hipMemcpyAsync(inputData, inputs[i], size * sizeof(float), hipMemcpyDeviceToHost, stream)); inputData += size; ++ i; } inputData = (float *)mInputBuffer; std::vector <Detection> result; for (const auto& yolo : mYoloKernel) { int stride = yolo.width*yolo.height; for (int j = 0;j < stride ;++j) { for (int k = 0;k < CHECK_COUNT; ++k ) { int beginIdx = (LOCATIONS + 1 + mClassCount)* stride *k + j; int objIndex = beginIdx + LOCATIONS*stride; //check obj float objProb = Logist(inputData[objIndex]); if(objProb <= IGNORE_THRESH) continue; Detection det; //det.objectness = objProb; int row = j / yolo.width; int cols = j % yolo.width; //Location det.bbox[0] = (cols + Logist(inputData[beginIdx]))/ yolo.width; det.bbox[1] = (row + Logist(inputData[beginIdx+stride]))/ yolo.height; det.bbox[2] = exp(inputData[beginIdx+2*stride]) * yolo.anchors[2*k]; det.bbox[3] = exp(inputData[beginIdx+3*stride]) * yolo.anchors[2*k + 1]; //classes std::vector<float> classProb; classProb.resize(mClassCount); for (int c = 0;c<mClassCount;++c) { float cProb = Logist(inputData[beginIdx + (5+c)*stride ]) * objProb; classProb[c] = (cProb > IGNORE_THRESH) ? cProb : 0; } auto maxEle = std::max_element(&classProb[0], &classProb[mClassCount] + 1); det.classId = std::distance(&classProb[0],maxEle); det.prob = *maxEle; result.emplace_back(det); } } inputData += (LOCATIONS + 1 + mClassCount) * stride * CHECK_COUNT; } int detCount =result.size(); auto data = (float *)mOutputBuffer; //copy count; data[0] = (float)detCount; //std::cout << "detCount"<< detCount << std::endl; data++; //copy result memcpy(data,result.data(),result.size()*sizeof(Detection)); std::cout << "copy result" << std::endl; //(count + det result) CUDA_CHECK(hipMemcpyAsync(outputs, mOutputBuffer, sizeof(float) + result.size()*sizeof(Detection), hipMemcpyHostToDevice, stream)); }; __device__ float Logist(float data){ return 1./(1. + exp(-data)); }; __global__ void CalDetection(const float *input, float *output,int noElements, int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= noElements) return; int stride = yoloWidth*yoloHeight; for (int k = 0;k < CHECK_COUNT; ++k ) { int beginIdx = (LOCATIONS + 1 + classes)* stride *k + idx; int objIndex = beginIdx + LOCATIONS*stride; //check objectness float objProb = Logist(input[objIndex]); if(objProb <= IGNORE_THRESH) continue; int row = idx / yoloWidth; int cols = idx % yoloWidth; Detection det_tmp; //Location det_tmp.bbox[0] = (cols + Logist(input[beginIdx]))/ yoloWidth; det_tmp.bbox[1] = (row + Logist(input[beginIdx+stride]))/ yoloHeight; det_tmp.bbox[2] = exp(input[beginIdx+2*stride]) * anchors[2*k]; det_tmp.bbox[3] = exp(input[beginIdx+3*stride]) * anchors[2*k + 1]; det_tmp.classId = -1; det_tmp.prob = 0; //classes float max = IGNORE_THRESH; for (int c = 0;c<classes;++c){ float cProb = Logist(input[beginIdx + (5 + c) * stride]) * objProb; if(cProb > max){ max = cProb; det_tmp.classId = c; det_tmp.prob = max; } } if(det_tmp.classId >= 0) { int resCount = (int)atomicAdd(output,1); char* data = (char * )output + sizeof(float) + resCount*sizeof(Detection); Detection* det = (Detection*)(data); det->bbox[0] = det_tmp.bbox[0]; det->bbox[1] = det_tmp.bbox[1]; det->bbox[2] = det_tmp.bbox[2]; det->bbox[3] = det_tmp.bbox[3]; det->classId = det_tmp.classId; det->prob = det_tmp.prob; } } } void YoloLayerPlugin::forwardGpu(const float *const * inputs,float * output,hipStream_t stream) { int numElem; void* devAnchor; size_t AnchorLen = sizeof(float)* CHECK_COUNT*2; CUDA_CHECK(hipMalloc(&devAnchor,AnchorLen)); //first detect count init 0 CUDA_CHECK(hipMemset(output, 0, sizeof(float))); for (int i = 0;i< mYoloKernel.size();++i) { const auto& yolo = mYoloKernel[i]; numElem = yolo.width*yolo.height; //copy anchor to device CUDA_CHECK(hipMemcpy(devAnchor,yolo.anchors,AnchorLen,hipMemcpyHostToDevice)); hipLaunchKernelGGL(( CalDetection), dim3((yolo.width*yolo.height + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0, inputs[i],output, numElem, yolo.width, yolo.height, (float *)devAnchor, mClassCount); } CUDA_CHECK(hipFree(devAnchor)); } int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream) { assert(batchSize == 1); //GPU forwardGpu((const float *const *)inputs,(float *)outputs[0],stream); //CPU //forwardCpu((const float *const *)inputs,(float *)outputs[0],stream); return 0; }; }
b18d19e7f7ce6a30849db21b343ffe7f054e87f0.cu
#include "YoloConfigs.h" #include "YoloLayer.h" using namespace Yolo; namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(const int cudaThread /*= 512*/):mThreadCount(cudaThread) { mClassCount = CLASS_NUM; mYoloKernel.clear(); mYoloKernel.push_back(yolo1); mYoloKernel.push_back(yolo2); mYoloKernel.push_back(yolo3); mKernelCount = mYoloKernel.size(); } YoloLayerPlugin::~YoloLayerPlugin() { if(mInputBuffer) CUDA_CHECK(cudaFreeHost(mInputBuffer)); if(mOutputBuffer) CUDA_CHECK(cudaFreeHost(mOutputBuffer)); } // create the plugin at runtime from a byte stream YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { using namespace Tn; const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mClassCount); read(d, mThreadCount); read(d, mKernelCount); mYoloKernel.resize(mKernelCount); auto kernelSize = mKernelCount*sizeof(YoloKernel); memcpy(mYoloKernel.data(),d,kernelSize); d += kernelSize; assert(d == a + length); } void YoloLayerPlugin::serialize(void* buffer) { using namespace Tn; char* d = static_cast<char*>(buffer), *a = d; write(d, mClassCount); write(d, mThreadCount); write(d, mKernelCount); auto kernelSize = mKernelCount*sizeof(YoloKernel); memcpy(d,mYoloKernel.data(),kernelSize); d += kernelSize; assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin::getSerializationSize() { return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size(); } int YoloLayerPlugin::initialize() { int totalCount = 0; for(const auto& yolo : mYoloKernel) totalCount += (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT; CUDA_CHECK(cudaHostAlloc(&mInputBuffer, totalCount * sizeof(float), cudaHostAllocDefault)); totalCount = 0;//detection count for(const auto& yolo : mYoloKernel) totalCount += yolo.width*yolo.height * CHECK_COUNT; CUDA_CHECK(cudaHostAlloc(&mOutputBuffer, sizeof(float) + totalCount * sizeof(Detection), cudaHostAllocDefault)); return 0; } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { //output the result to channel int totalCount = 0; for(const auto& yolo : mYoloKernel) totalCount += yolo.width*yolo.height * CHECK_COUNT * sizeof(Detection) / sizeof(float); return Dims3(totalCount + 1, 1, 1); } void YoloLayerPlugin::forwardCpu(const float*const * inputs, float* outputs, cudaStream_t stream) { auto Logist = [=](float data){ return 1./(1. + exp(-data)); }; CUDA_CHECK(cudaStreamSynchronize(stream)); int i = 0; float* inputData = (float *)mInputBuffer; for(const auto& yolo : mYoloKernel) { int size = (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT; CUDA_CHECK(cudaMemcpyAsync(inputData, inputs[i], size * sizeof(float), cudaMemcpyDeviceToHost, stream)); inputData += size; ++ i; } inputData = (float *)mInputBuffer; std::vector <Detection> result; for (const auto& yolo : mYoloKernel) { int stride = yolo.width*yolo.height; for (int j = 0;j < stride ;++j) { for (int k = 0;k < CHECK_COUNT; ++k ) { int beginIdx = (LOCATIONS + 1 + mClassCount)* stride *k + j; int objIndex = beginIdx + LOCATIONS*stride; //check obj float objProb = Logist(inputData[objIndex]); if(objProb <= IGNORE_THRESH) continue; Detection det; //det.objectness = objProb; int row = j / yolo.width; int cols = j % yolo.width; //Location det.bbox[0] = (cols + Logist(inputData[beginIdx]))/ yolo.width; det.bbox[1] = (row + Logist(inputData[beginIdx+stride]))/ yolo.height; det.bbox[2] = exp(inputData[beginIdx+2*stride]) * yolo.anchors[2*k]; det.bbox[3] = exp(inputData[beginIdx+3*stride]) * yolo.anchors[2*k + 1]; //classes std::vector<float> classProb; classProb.resize(mClassCount); for (int c = 0;c<mClassCount;++c) { float cProb = Logist(inputData[beginIdx + (5+c)*stride ]) * objProb; classProb[c] = (cProb > IGNORE_THRESH) ? cProb : 0; } auto maxEle = std::max_element(&classProb[0], &classProb[mClassCount] + 1); det.classId = std::distance(&classProb[0],maxEle); det.prob = *maxEle; result.emplace_back(det); } } inputData += (LOCATIONS + 1 + mClassCount) * stride * CHECK_COUNT; } int detCount =result.size(); auto data = (float *)mOutputBuffer; //copy count; data[0] = (float)detCount; //std::cout << "detCount"<< detCount << std::endl; data++; //copy result memcpy(data,result.data(),result.size()*sizeof(Detection)); std::cout << "copy result" << std::endl; //(count + det result) CUDA_CHECK(cudaMemcpyAsync(outputs, mOutputBuffer, sizeof(float) + result.size()*sizeof(Detection), cudaMemcpyHostToDevice, stream)); }; __device__ float Logist(float data){ return 1./(1. + exp(-data)); }; __global__ void CalDetection(const float *input, float *output,int noElements, int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= noElements) return; int stride = yoloWidth*yoloHeight; for (int k = 0;k < CHECK_COUNT; ++k ) { int beginIdx = (LOCATIONS + 1 + classes)* stride *k + idx; int objIndex = beginIdx + LOCATIONS*stride; //check objectness float objProb = Logist(input[objIndex]); if(objProb <= IGNORE_THRESH) continue; int row = idx / yoloWidth; int cols = idx % yoloWidth; Detection det_tmp; //Location det_tmp.bbox[0] = (cols + Logist(input[beginIdx]))/ yoloWidth; det_tmp.bbox[1] = (row + Logist(input[beginIdx+stride]))/ yoloHeight; det_tmp.bbox[2] = exp(input[beginIdx+2*stride]) * anchors[2*k]; det_tmp.bbox[3] = exp(input[beginIdx+3*stride]) * anchors[2*k + 1]; det_tmp.classId = -1; det_tmp.prob = 0; //classes float max = IGNORE_THRESH; for (int c = 0;c<classes;++c){ float cProb = Logist(input[beginIdx + (5 + c) * stride]) * objProb; if(cProb > max){ max = cProb; det_tmp.classId = c; det_tmp.prob = max; } } if(det_tmp.classId >= 0) { int resCount = (int)atomicAdd(output,1); char* data = (char * )output + sizeof(float) + resCount*sizeof(Detection); Detection* det = (Detection*)(data); det->bbox[0] = det_tmp.bbox[0]; det->bbox[1] = det_tmp.bbox[1]; det->bbox[2] = det_tmp.bbox[2]; det->bbox[3] = det_tmp.bbox[3]; det->classId = det_tmp.classId; det->prob = det_tmp.prob; } } } void YoloLayerPlugin::forwardGpu(const float *const * inputs,float * output,cudaStream_t stream) { int numElem; void* devAnchor; size_t AnchorLen = sizeof(float)* CHECK_COUNT*2; CUDA_CHECK(cudaMalloc(&devAnchor,AnchorLen)); //first detect count init 0 CUDA_CHECK(cudaMemset(output, 0, sizeof(float))); for (int i = 0;i< mYoloKernel.size();++i) { const auto& yolo = mYoloKernel[i]; numElem = yolo.width*yolo.height; //copy anchor to device CUDA_CHECK(cudaMemcpy(devAnchor,yolo.anchors,AnchorLen,cudaMemcpyHostToDevice)); CalDetection<<< (yolo.width*yolo.height + mThreadCount - 1) / mThreadCount, mThreadCount>>> (inputs[i],output, numElem, yolo.width, yolo.height, (float *)devAnchor, mClassCount); } CUDA_CHECK(cudaFree(devAnchor)); } int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream) { assert(batchSize == 1); //GPU forwardGpu((const float *const *)inputs,(float *)outputs[0],stream); //CPU //forwardCpu((const float *const *)inputs,(float *)outputs[0],stream); return 0; }; }
c6894f101b3a58107235aeac02da89e7ed401fd7.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMasked.cu" #else THC_API void THCTensor_(maskedFill)(THCState* state, THCTensor *tensor, THCudaByteTensor *mask, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, mask)); THArgCheck(THCTensor_(nElement)(state, tensor) == THCudaByteTensor_nElement(state, mask), 2, "sizes do not match"); if (!THC_pointwiseApply2<real, uint8_t>(state, tensor, mask, TensorMaskedFillOp<real, unsigned char>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(maskedFillByte)(THCState* state, THCTensor *tensor, THByteTensor *mask, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, mask->sizes(), {}); THCudaByteTensor_copyByte(state, maskCuda, mask); THCTensor_(maskedFill)(state, tensor, maskCuda, value); THCudaByteTensor_free(state, maskCuda); } THC_API void THCTensor_(maskedCopy)(THCState* state, THCTensor *tensor, THCudaByteTensor *mask, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask)); ptrdiff_t maskSize = THCudaByteTensor_nElement(state, mask); ptrdiff_t tensorSize = THCTensor_(nElement)(state, tensor); ptrdiff_t srcSize = THCTensor_(nElement)(state, src); // `mask` and `tensor` must have the same number of elements THArgCheck(maskSize == tensorSize, 2, "mask and tensor must have the same number of elements"); // Determine our output size ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask); // The number of `1` elements present in the mask must be <= the // number of elements available in `src` if (totalElements > srcSize) { THArgCheck(false, 2, "source nElements must be == mask `1` elements"); } // FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed // iterator prefix sums? Convert `mask` to the same datatype as what // we're accumulating the prefix sum in (int64_t) to get around it THCudaLongTensor* maskLong = THCudaLongTensor_new(state); at::IntList maskSizes = mask->sizes(); THCudaLongTensor_resize(state, maskLong, maskSizes, {}); THCudaLongTensor_copyCudaByte(state, maskLong, mask); // Use a prefix sum to determine the output locations of the masked elements THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state); THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, {}); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<int64_t> maskData(THCudaLongTensor_data(state, maskLong)); thrust::device_ptr<int64_t> maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum)); thrust::exclusive_scan( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif maskData, maskData + THCudaLongTensor_nElement(state, maskLong), maskPrefixSumData); // We are getting elements from `src` based on an offset from // `maskPrefixSum`, so that should be made contiguous too THCTensor* contigSrc = THCTensor_(newContiguous)(state, src); // update `tensor` where `mask` == 1 but pull from `src` at // maskPrefixSum bool status = THC_pointwiseApply3<real, uint8_t, int64_t>( state, tensor, mask, maskPrefixSum, TensorMaskedCopyOp<real, unsigned char, int64_t>( THCTensor_(data)(state, contigSrc))); THCTensor_(free)(state, contigSrc); THCudaLongTensor_free(state, maskLong); THCudaLongTensor_free(state, maskPrefixSum); THArgCheck(status, 2, CUTORCH_DIM_WARNING); THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(maskedCopyByte)(THCState* state, THCTensor *tensor, THByteTensor *mask, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, mask->sizes(), {}); THCudaByteTensor_copyByte(state, maskCuda, mask); THCTensor_(maskedCopy)(state, tensor, maskCuda, src); THCudaByteTensor_free(state, maskCuda); } THC_API void THCTensor_(maskedSelect)(THCState* state, THCTensor* tensor, THCTensor* src, THCudaByteTensor* mask) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask)); THArgCheck(THCudaByteTensor_nElement(state, mask) == THCTensor_(nElement)(state, src), 2, "sizes do not match"); // Determine our output size ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask); THCTensor* tensorContig = THCTensor_(newContiguous)(state, tensor); THCTensor_(resize1d)(state, tensorContig, totalElements); if (tensor != tensorContig) { THCTensor_(resize1d)(state, tensor, totalElements); } // FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed // iterator prefix sums? Convert `mask` to the same datatype as what // we're accumulating the prefix sum in (int64_t) to get around it THCudaLongTensor* maskLong = THCudaLongTensor_new(state); at::IntList maskSizes = mask->sizes(); THCudaLongTensor_resize(state, maskLong, maskSizes, {}); THCudaLongTensor_copyCudaByte(state, maskLong, mask); // Use a prefix sum to determine the output locations of the masked elements THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state); THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, {}); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<int64_t> maskData(THCudaLongTensor_data(state, maskLong)); thrust::device_ptr<int64_t> maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum)); thrust::exclusive_scan( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif maskData, maskData + THCudaLongTensor_nElement(state, maskLong), maskPrefixSumData); // Then copy over the masked elements at their desired output index bool status = THC_pointwiseApply3<uint8_t, int64_t, real>( state, mask, maskPrefixSum, src, TensorMaskedSelectOp<real, unsigned char, int64_t>( THCTensor_(data)(state, tensor))); THCudaLongTensor_free(state, maskLong); THCudaLongTensor_free(state, maskPrefixSum); if (tensor != tensorContig) { THCTensor_(freeCopyTo)(state, tensorContig, tensor); } else { THCTensor_(free)(state, tensorContig); } THArgCheck(status, 2, CUTORCH_DIM_WARNING); THCudaCheck(hipGetLastError()); } // FIXME: remove now that we have THCudaByteTensor? THC_API void THCTensor_(maskedSelectByte)(THCState* state, THCTensor *tensor, THCTensor *src, THByteTensor *mask) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, mask->sizes(), {}); THCudaByteTensor_copyByte(state, maskCuda, mask); THCTensor_(maskedSelect)(state, tensor, src, maskCuda); THCudaByteTensor_free(state, maskCuda); } #endif
c6894f101b3a58107235aeac02da89e7ed401fd7.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMasked.cu" #else THC_API void THCTensor_(maskedFill)(THCState* state, THCTensor *tensor, THCudaByteTensor *mask, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, mask)); THArgCheck(THCTensor_(nElement)(state, tensor) == THCudaByteTensor_nElement(state, mask), 2, "sizes do not match"); if (!THC_pointwiseApply2<real, uint8_t>(state, tensor, mask, TensorMaskedFillOp<real, unsigned char>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(maskedFillByte)(THCState* state, THCTensor *tensor, THByteTensor *mask, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, mask->sizes(), {}); THCudaByteTensor_copyByte(state, maskCuda, mask); THCTensor_(maskedFill)(state, tensor, maskCuda, value); THCudaByteTensor_free(state, maskCuda); } THC_API void THCTensor_(maskedCopy)(THCState* state, THCTensor *tensor, THCudaByteTensor *mask, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask)); ptrdiff_t maskSize = THCudaByteTensor_nElement(state, mask); ptrdiff_t tensorSize = THCTensor_(nElement)(state, tensor); ptrdiff_t srcSize = THCTensor_(nElement)(state, src); // `mask` and `tensor` must have the same number of elements THArgCheck(maskSize == tensorSize, 2, "mask and tensor must have the same number of elements"); // Determine our output size ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask); // The number of `1` elements present in the mask must be <= the // number of elements available in `src` if (totalElements > srcSize) { THArgCheck(false, 2, "source nElements must be == mask `1` elements"); } // FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed // iterator prefix sums? Convert `mask` to the same datatype as what // we're accumulating the prefix sum in (int64_t) to get around it THCudaLongTensor* maskLong = THCudaLongTensor_new(state); at::IntList maskSizes = mask->sizes(); THCudaLongTensor_resize(state, maskLong, maskSizes, {}); THCudaLongTensor_copyCudaByte(state, maskLong, mask); // Use a prefix sum to determine the output locations of the masked elements THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state); THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, {}); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<int64_t> maskData(THCudaLongTensor_data(state, maskLong)); thrust::device_ptr<int64_t> maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum)); thrust::exclusive_scan( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif maskData, maskData + THCudaLongTensor_nElement(state, maskLong), maskPrefixSumData); // We are getting elements from `src` based on an offset from // `maskPrefixSum`, so that should be made contiguous too THCTensor* contigSrc = THCTensor_(newContiguous)(state, src); // update `tensor` where `mask` == 1 but pull from `src` at // maskPrefixSum bool status = THC_pointwiseApply3<real, uint8_t, int64_t>( state, tensor, mask, maskPrefixSum, TensorMaskedCopyOp<real, unsigned char, int64_t>( THCTensor_(data)(state, contigSrc))); THCTensor_(free)(state, contigSrc); THCudaLongTensor_free(state, maskLong); THCudaLongTensor_free(state, maskPrefixSum); THArgCheck(status, 2, CUTORCH_DIM_WARNING); THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(maskedCopyByte)(THCState* state, THCTensor *tensor, THByteTensor *mask, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, mask->sizes(), {}); THCudaByteTensor_copyByte(state, maskCuda, mask); THCTensor_(maskedCopy)(state, tensor, maskCuda, src); THCudaByteTensor_free(state, maskCuda); } THC_API void THCTensor_(maskedSelect)(THCState* state, THCTensor* tensor, THCTensor* src, THCudaByteTensor* mask) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask)); THArgCheck(THCudaByteTensor_nElement(state, mask) == THCTensor_(nElement)(state, src), 2, "sizes do not match"); // Determine our output size ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask); THCTensor* tensorContig = THCTensor_(newContiguous)(state, tensor); THCTensor_(resize1d)(state, tensorContig, totalElements); if (tensor != tensorContig) { THCTensor_(resize1d)(state, tensor, totalElements); } // FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed // iterator prefix sums? Convert `mask` to the same datatype as what // we're accumulating the prefix sum in (int64_t) to get around it THCudaLongTensor* maskLong = THCudaLongTensor_new(state); at::IntList maskSizes = mask->sizes(); THCudaLongTensor_resize(state, maskLong, maskSizes, {}); THCudaLongTensor_copyCudaByte(state, maskLong, mask); // Use a prefix sum to determine the output locations of the masked elements THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state); THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, {}); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<int64_t> maskData(THCudaLongTensor_data(state, maskLong)); thrust::device_ptr<int64_t> maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum)); thrust::exclusive_scan( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif maskData, maskData + THCudaLongTensor_nElement(state, maskLong), maskPrefixSumData); // Then copy over the masked elements at their desired output index bool status = THC_pointwiseApply3<uint8_t, int64_t, real>( state, mask, maskPrefixSum, src, TensorMaskedSelectOp<real, unsigned char, int64_t>( THCTensor_(data)(state, tensor))); THCudaLongTensor_free(state, maskLong); THCudaLongTensor_free(state, maskPrefixSum); if (tensor != tensorContig) { THCTensor_(freeCopyTo)(state, tensorContig, tensor); } else { THCTensor_(free)(state, tensorContig); } THArgCheck(status, 2, CUTORCH_DIM_WARNING); THCudaCheck(cudaGetLastError()); } // FIXME: remove now that we have THCudaByteTensor? THC_API void THCTensor_(maskedSelectByte)(THCState* state, THCTensor *tensor, THCTensor *src, THByteTensor *mask) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, mask->sizes(), {}); THCudaByteTensor_copyByte(state, maskCuda, mask); THCTensor_(maskedSelect)(state, tensor, src, maskCuda); THCudaByteTensor_free(state, maskCuda); } #endif
437fafc3a81e9d8fcac1a72fa878a1c216c2e2b9.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "zone_map.h" void process_error(int severity, string err); // this should probably live in a utils header file bool fh_equal_to(const float_type x, const float_type y) { return (((x-y) < EPSILON) && ((x-y) > -EPSILON)); } bool fh_less(const float_type x, const float_type y) { return ((y-x) > EPSILON); } bool fh_greater(const float_type x, const float_type y) { return ((x-y) > EPSILON); } bool fh_greater_equal_to(const float_type x, const float_type y) { return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON))); } bool fh_less_equal_to(const float_type x, const float_type y) { return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON))); } char host_logical_and(char column1, char column2) { //cout << "AND " << column1 << " " << column2 << endl; if (column1 == 'A' && column2 == 'A') return 'A'; else if (column1 == 'N' || column2 == 'N') { return 'N'; } else return 'R'; } char host_logical_or(char column1, char column2) { //cout << "OR " << column1 << " " << column2 << endl; if (column1 == 'A' && column2 == 'A') return 'A'; else if (column1 == 'N' && column2 == 'N') return 'N'; else return 'R'; } char host_compare(int_type s, int_type d, int_type op_type) { char res = 'N'; if (op_type == 2 && d>s ) // > res = 'A'; else if (op_type == 1 && d<s) // < res = 'A'; else if (op_type == 6 && d>=s) // >= res = 'A'; else if (op_type == 5 && d<=s) // <= res = 'A'; else if (op_type == 4 && d==s)// = res = 'A'; else // != if(d!=s) res = 'A'; return res; } char host_compare(float_type s, float_type d, int_type op_type) { char res = 'N'; if (op_type == 2 && (d-s) > EPSILON) // > res = 'A'; else if (op_type == 1 && (s-d) > EPSILON) // < res = 'A'; else if (op_type == 6 && ((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) // >= res = 'A'; else if (op_type == 5 && ((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) // <= res = 'A'; else if (op_type == 4 && ((d-s) < EPSILON) && ((d-s) > -EPSILON))// = res = 'A'; else // != if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 'A'; return res; } char host_compare(int_type* column1, int_type d, int_type op_type) { char res = 'R'; //cout << "CMP " << column1[0] << " " << column1[1] << " " << d << " " << op_type << endl; if (op_type == 2) { // > if (column1[1] <= d) res = 'N'; else if (column1[0] > d) res = 'A'; } else if (op_type == 1) { // < if (column1[0] >= d) res = 'N'; else if (column1[1] < d) res = 'A'; } else if (op_type == 6) { // >= if (column1[1] < d) res = 'N'; else if (column1[0] >= d) res = 'A'; } else if (op_type == 5) { // <= if (column1[0] > d) res = 'N'; else if (column1[1] <= d) res = 'A'; } else if (op_type == 4 && column1[0] == d && column1[1] == d) { // = res = 'A'; }; //cout << "res " << res << endl; return res; } char host_compare(float_type* column1, float_type d, int_type op_type) { char res = 'R'; //cout << "CMP " << column1[0] << " " << column1[1] << " with " << d << endl; if (op_type == 2) { // > if(fh_less_equal_to(column1[1],d)) { res = 'N'; } else if(fh_greater(column1[0],d)) { res = 'A'; }; } else if (op_type == 1) { // < if(fh_less(column1[1],d)) { res = 'A'; } else if(fh_greater_equal_to(column1[0],d)) { res = 'N'; }; } else if (op_type == 6) { // >= if(fh_greater_equal_to(column1[0],d)) { res = 'A'; } else if(fh_less(column1[1],d)) { res = 'N'; }; } else if (op_type == 5) { // <= if(fh_less_equal_to(column1[1],d)) { res = 'A'; } else if(fh_greater(column1[0],d)) { res = 'N'; }; } else if (op_type == 4 && fh_equal_to(column1[0],d) && fh_equal_to(column1[1],d)) // = res = 'A'; //cout << "res " << res << endl; return res; } char host_compare(int_type* column1, int_type* column2, int_type op_type) { char res = 'R'; if (op_type == 2) { // > if(column1[0] > column2[1]) res = 'A'; else if(column1[1] <= column2[0]) res = 'N'; } else if (op_type == 1) { // < if(column1[1] < column2[0]) res = 'A'; else if(column1[0] >= column2[1]) res = 'N'; } else if (op_type == 6) { // >= if(column1[0] >= column2[1]) res = 'A'; else if(column1[1] < column2[0]) res = 'N'; } else if (op_type == 5) { // <= if(column1[1] <= column2[0]) res = 'A'; else if(column1[0] > column2[1]) res = 'N'; } else if (op_type == 4 && column1[0] == column2[1] && column1[1] == column2[0]) // = res = 'A'; return res; } char host_compare(float_type* column1, float_type* column2, int_type op_type) { char res = 'R'; if (op_type == 2) { // > if(fh_greater(column1[0],column2[1])) res = 'A'; else if(fh_less_equal_to(column1[1],column2[0])) res = 'N'; } else if (op_type == 1) { // < if(fh_less(column1[1],column2[0])) res = 'A'; else if(fh_greater_equal_to(column1[0],column2[1])) res = 'N'; } else if (op_type == 6) { // >= if(fh_greater_equal_to(column1[1],column2[0])) res = 'A'; else if(fh_less(column1[1],column2[0])) res = 'N'; } else if (op_type == 5) { // <= if(fh_less_equal_to(column1[1],column2[0])) res = 'A'; else if(fh_greater(column1[0],column2[1])) res = 'N'; } else if (op_type == 4 && fh_equal_to(column1[0], column2[1]) && fh_equal_to(column1[1],column2[0])) // = res = 'A'; return res; } char host_compare(float_type* column1, int_type* column2, int_type op_type) { char res = 'R'; if (op_type == 2) { // > if(fh_greater(column1[0],(float_type)column2[1])) res = 'A'; else if(fh_less_equal_to(column1[1],(float_type)column2[0])) res = 'N'; } else if (op_type == 1) { // < if(fh_less(column1[1],(float_type)column2[0])) res = 'A'; else if(fh_greater_equal_to(column1[0],(float_type)column2[1])) res = 'N'; } else if (op_type == 6) { // >= if(fh_greater_equal_to(column1[1],(float_type)column2[0])) res = 'A'; else if(fh_less(column1[1],(float_type)column2[0])) res = 'N'; } else if (op_type == 5) { // <= if(fh_less_equal_to(column1[1],(float_type)column2[0])) res = 'A'; else if(fh_greater(column1[0],(float_type)column2[1])) res = 'N'; } else if (op_type == 4 && fh_equal_to(column1[0],(float_type) column2[1]) && fh_equal_to(column1[1],(float_type)column2[0])) // = res = 'A'; return res; } float_type* host_op(int_type* column1, float_type* column2, string op_type, int reverse) { float_type* temp = (float_type*)malloc(2*float_size); temp[0] = (float_type)column1[0]; temp[1] = (float_type)column1[1]; if(reverse == 0) { if (op_type.compare("MUL") == 0) { temp[0] = temp[0] * column2[0]; temp[1] = temp[1] * column2[1]; } else if (op_type.compare("ADD") == 0) { temp[0] = temp[0] + column2[0]; temp[1] = temp[1] + column2[1]; } else if (op_type.compare("MINUS") == 0) { temp[0] = column2[0] - temp[0]; temp[1] = column2[1] - temp[1]; } else { temp[0] = column2[0] / temp[0]; temp[1] = column2[1] / temp[1]; } } else { if (op_type.compare("MUL") == 0) { temp[0] = temp[0] * column2[0]; temp[1] = temp[1] * column2[1]; } else if (op_type.compare("ADD") == 0) { temp[0] = temp[0] + column2[0]; temp[1] = temp[1] + column2[1]; } else if (op_type.compare("MINUS") == 0) { temp[0] = temp[0] - column2[0]; temp[1] = temp[1] - column2[1]; } else { temp[0] = temp[0] / column2[0]; temp[1] = temp[1] / column2[1]; } }; return temp; } int_type* host_op(int_type* column1, int_type* column2, string op_type, int reverse) { int_type* temp = (int_type*)malloc(2*int_size); if(reverse == 0) { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * column2[0]; temp[1] = column1[1] * column2[1]; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + column2[0]; temp[1] = column1[1] + column2[1]; } else if (op_type.compare("MINUS") == 0) { temp[0] = column1[0] - column2[0]; temp[1] = column1[1] - column2[1]; } else { temp[0] = column1[0] / column2[0]; temp[1] = column1[1] / column2[1]; } } else { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * column2[0]; temp[1] = column1[1] * column2[1]; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + column2[0]; temp[1] = column1[1] + column2[1]; } else if (op_type.compare("MINUS") == 0) { temp[0] = column2[0] - column1[0]; temp[1] = column2[1] - column1[1]; } else { temp[0] = column2[0] / column1[0]; temp[1] = column2[1] / column1[1]; } } return temp; } float_type* host_op(float_type* column1, float_type* column2, string op_type, int reverse) { float_type* temp = (float_type*)malloc(2*float_size); if(reverse == 0) { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * column2[0]; temp[1] = column1[1] * column2[1]; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + column2[0]; temp[1] = column1[1] + column2[1]; } else if (op_type.compare("MINUS") == 0) { temp[0] = column1[0] - column2[0]; temp[1] = column1[1] - column2[1]; } else { temp[0] = column1[0] / column2[0]; temp[1] = column1[1] / column2[1]; } } else { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * column2[0]; temp[1] = column1[1] * column2[1]; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + column2[0]; temp[1] = column1[1] + column2[1]; } else if (op_type.compare("MINUS") == 0) { temp[0] = column2[0] - column1[0]; temp[1] = column2[1] - column1[1]; } else { temp[0] = column2[0] / column1[0]; temp[1] = column2[1] / column1[1]; } } return temp; } int_type* host_op(int_type* column1, int_type d, string op_type, int reverse) { int_type* temp = (int_type*)malloc(2*int_size); if(reverse == 0) { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * d; temp[1] = column1[1] * d; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + d; temp[1] = column1[1] + d; } else if (op_type.compare("MINUS") == 0) { temp[0] = column1[0] - d; temp[1] = column1[1] - d; } else { temp[0] = column1[0] / d; temp[1] = column1[1] / d; } } else { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * d; temp[1] = column1[1] * d; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + d; temp[1] = column1[1] + d; } else if (op_type.compare("MINUS") == 0) { temp[0] = d - column1[0]; temp[1] = d - column1[1]; } else { temp[0] = d / column1[0]; temp[1] = d / column1[1]; } }; return temp; } float_type* host_op(int_type* column1, float_type d, string op_type, int reverse) { float_type* temp = (float_type*)malloc(2*float_size); temp[0] = (float_type)column1[0]; temp[1] = (float_type)column1[1]; float_type* temp1 = (float_type*)malloc(2*float_size); if(reverse == 0) { if (op_type.compare("MUL") == 0) { temp1[0] = temp[0] * d; temp1[1] = temp[1] * d; } else if (op_type.compare("ADD") == 0) { temp1[0] = temp[0] + d; temp1[1] = temp[1] + d; } else if (op_type.compare("MINUS") == 0) { temp1[0] = temp[0] - d; temp1[1] = temp[1] - d; } else { temp1[0] = temp[0] / d; temp1[1] = temp[1] / d; } } else { if (op_type.compare("MUL") == 0) { temp1[0] = temp[0] * d; temp1[1] = temp[1] * d; } else if (op_type.compare("ADD") == 0) { temp1[0] = temp[0] + d; temp1[1] = temp[1] + d; } else if (op_type.compare("MINUS") == 0) { temp1[0] = d - temp[0]; temp1[1] = d - temp[1]; } else { temp1[0] = d / temp[0]; temp1[1] = d / temp[1]; } }; free(temp); return temp1; } float_type* host_op(float_type* column1, float_type d, string op_type,int reverse) { float_type* temp = (float_type*)malloc(2*float_size); if(reverse == 0) { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * d; temp[1] = column1[1] * d; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + d; temp[1] = column1[1] + d; } else if (op_type.compare("MINUS") == 0) { temp[0] = column1[0] - d; temp[1] = column1[1] - d; } else { temp[0] = column1[0] / d; temp[1] = column1[1] / d; } } else { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * d; temp[1] = column1[1] * d; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + d; temp[1] = column1[1] + d; } else if (op_type.compare("MINUS") == 0) { temp[0] = d - column1[0]; temp[1] = d - column1[1]; } else { temp[0] = d / column1[0]; temp[1] = d / column1[1]; } }; return temp; } unsigned int precision_func(unsigned int& p1, unsigned int& p2, string op) { if (op.compare("DIV") != 0 ) { unsigned int res; if (op.compare("MUL") != 0 ) { if(p1 > p2) { res = p1; p2 = p1-p2; p1 = 0; } else { res = p1; p1 = p2-p1; p2 = 0; }; return res; } else { //std::swap(p1,p2); res = p1+p2; p1 = 0; p2 = 0; return res; }; } else { if(p1 == p2) { p1 = p1+4; p2 = 0; return p1; } else { if(p1 > p2) { p1 = p1 + (p1-p2) + 4; p2 = 0; return p1; } else { p2 = p2 + (p2-p1) + 4; p1 = 0; return p2; } } }; } //CudaSet a contains two records - with all minimum and maximum values of the segment //We need to determine if this segment needs to be processed //The check takes place in host's memory char zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, queue<unsigned int> op_nums_precision, CudaSet* a, unsigned int segment) { stack<string> exe_type; stack<string> exe_value; stack<int_type*> exe_vectors; stack<float_type*> exe_vectors_f; stack<int_type> exe_nums; stack<char> bool_vectors; stack<unsigned int> exe_precision; string s1, s2, s1_val, s2_val; int_type n1, n2, res; if(a->not_compressed) return 'R'; //first we need to set all host arrays [0] and [1] of t to min and max values of appropriate files set<string> uniques; queue<string> fields(op_value); CudaSet *t; FILE* f; unsigned int cnt; string f1; while(!fields.empty()) { if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) { if(a->filtered) t = varNames[a->source_name]; else t = a; // copy t min and max values to a only if int, decimal or float if(t->type[fields.front()] <= 1) { f1 = t->load_file_name + "." + fields.front() + "." + to_string(segment); f = fopen (f1.c_str() , "rb" ); if(!f) { cout << "Error opening " << f1 << " file " << endl; exit(0); }; fread((char *)&cnt, 4, 1, f); if (t->type[fields.front()] == 0) { a->h_columns_int[fields.front()].resize(2); fread((char *)&a->h_columns_int[fields.front()][0], 8, 1, f); fread((char *)&a->h_columns_int[fields.front()][1], 8, 1, f); fseek(f, 8+cnt, SEEK_CUR); fread((char *)&a->mRecCount, 4, 1, f); //cout << endl << "ZONE " << a->mRecCount << endl; fread((char *)&cnt, 4, 1, f); //cout << "file " << f1 << " " << segment << " " << a->h_columns_int[fields.front()][0] << ":" << a->h_columns_int[fields.front()][1] << endl; } else { long long int t; a->h_columns_float[fields.front()].resize(2); fread((char *)&t, 8, 1, f); a->h_columns_float[fields.front()][0] = (float_type)t/100.0; fread((char *)&t, 8, 1, f); a->h_columns_float[fields.front()][1] = (float_type)t/100.0; //cout << "file " << f1 << " " << segment << " " << a->h_columns_float[a->type_index[colIndex]][0] << ":" << a->h_columns_float[a->type_index[colIndex]][1] << endl; }; fclose(f); }; }; uniques.insert(fields.front()); fields.pop(); }; for(int i=0; !op_type.empty(); ++i, op_type.pop()) { string ss = op_type.front(); //cout << ss << endl; if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("FLOAT") == 0 || ss.compare("STRING") == 0) { exe_type.push(ss); if (ss.compare("NUMBER") == 0) { exe_nums.push(op_nums.front()); op_nums.pop(); exe_precision.push(op_nums_precision.front()); op_nums_precision.pop(); } else if (ss.compare("NAME") == 0) { if(var_exists(a, op_value.front())) { exe_value.push(op_value.front()); op_value.pop(); } else { process_error(1, "Couldn't find column " + op_value.front()); //cout << "Couldn't find column " << op_value.front() << endl; //exit(0); }; } else if (ss.compare("STRING") == 0) { exe_value.push(op_value.front()); op_value.pop(); } } else { if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) { // get 2 values from the stack s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); auto p1 = exe_precision.top(); exe_precision.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto pres = precision_func(p1, p2, ss); exe_precision.push(pres); if(p1) n1 = n1*(unsigned int)pow(10,p1); if(p2) n2 = n2*(unsigned int)pow(10,p2); if (ss.compare("ADD") == 0 ) res = n1+n2; else if (ss.compare("MUL") == 0 ) res = n1*n2; else if (ss.compare("DIV") == 0 ) res = n1/n2; else res = n1-n2; exe_type.push("NUMBER"); exe_nums.push(res); } else if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); int_type val; int_type* t = get_host_vec(a, s1_val, exe_vectors); exe_type.push("NAME"); exe_value.push(""); exe_precision.push(0); } else if (s2.compare("NAME") == 0 && s1.compare("STRING") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); int_type val; int_type* t = get_host_vec(a, s2_val, exe_vectors); //cout << "name " << s2_val << endl; exe_type.push("NAME"); exe_value.push(""); exe_precision.push(0); } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { s1_val = exe_value.top(); exe_value.pop(); n1 = exe_nums.top(); exe_nums.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto p1 = get_decimals(a, s1_val, exe_precision); int_type* t = get_host_vec(a, s1_val, exe_vectors); auto pres = precision_func(p1, p2, ss); exe_precision.push(pres); if(p1) { t[0] = t[0]*(unsigned int)pow(10,p1); t[1] = t[1]*(unsigned int)pow(10,p1); }; if(p2) { n1 = n1*(unsigned int)pow(10,p2); }; exe_type.push("NAME"); exe_value.push(""); exe_vectors.push(host_op(t,n1,ss,1)); } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s2_val = exe_value.top(); exe_value.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto p1 = get_decimals(a, s2_val, exe_precision); int_type* t = get_host_vec(a, s2_val, exe_vectors); auto pres = precision_func(p1, p2, ss); exe_precision.push(pres); if(p1) { t[0] = t[0]*(unsigned int)pow(10,p1); t[1] = t[1]*(unsigned int)pow(10,p1); }; if(p2) { n1 = n1*(unsigned int)pow(10,p2); }; exe_type.push("NAME"); exe_value.push(""); exe_vectors.push(host_op(t,n1,ss,0)); } else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); auto p1 = get_decimals(a, s1_val, exe_precision); auto p2 = get_decimals(a, s2_val, exe_precision); int_type* t = get_host_vec(a, s1_val, exe_vectors); int_type* s3 = get_host_vec(a, s2_val, exe_vectors);; exe_type.push("NAME"); exe_value.push(""); auto pres = precision_func(p1, p2, ss); exe_precision.push(pres); if(p1) { t[0] = t[0]*(unsigned int)pow(10,p1); t[1] = t[1]*(unsigned int)pow(10,p1); }; if(p2) { s3[0] = s3[0]*(unsigned int)pow(10,p2); s3[1] = s3[1]*(unsigned int)pow(10,p2); }; exe_vectors.push(host_op(t,s3,ss,1)); if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) == a->columnNames.end()) delete [] t; if(std::find(a->columnNames.begin(), a->columnNames.end(), s2_val) == a->columnNames.end()) delete [] s3; } } else if (ss.compare("CMP") == 0) { int_type cmp_type = op_nums.front(); op_nums.pop(); s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); auto p1 = exe_precision.top(); exe_precision.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto pres = ::max(p1, p2); exe_precision.push(pres); exe_type.push("NAME"); exe_value.push(""); if(p1) n1 = n1*(unsigned int)pow(10,pres-p1); if(p2) n2 = n2*(unsigned int)pow(10,pres-p2); bool_vectors.push(host_compare(n1,n2,cmp_type)); } else if (s1.compare("STRING") == 0 && s2.compare("NAME") == 0) { time_t tt; s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); int_type val; int_type* t = get_host_vec(a, s2_val, exe_vectors); auto pos = s1_val.find("date()"); bool_vectors.push('R'); exe_type.push("NAME"); exe_value.push(""); } else if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) { s2_val = exe_value.top(); exe_value.pop(); s1_val = exe_value.top(); exe_value.pop(); int_type val; time_t tt; int_type* t = get_host_vec(a, s1_val, exe_vectors); bool_vectors.push('R'); exe_type.push("NAME"); exe_value.push(""); } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s1_val = exe_value.top(); exe_value.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto p1 = get_decimals(a, s1_val, exe_precision); int_type* t = get_host_vec(a, s1_val, exe_vectors); auto pres = ::max(p1, p2); exe_precision.push(pres); if(p1) { t[0] = t[0]*(unsigned int)pow(10,pres-p1); t[1] = t[1]*(unsigned int)pow(10,pres-p1); }; if(p2) { n1 = n1*(unsigned int)pow(10,pres-p2); }; exe_type.push("NAME"); exe_value.push(""); bool_vectors.push(host_compare(t,n1,cmp_type)); } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); n1 = exe_nums.top(); exe_nums.pop(); s2_val = exe_value.top(); exe_value.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto p1 = get_decimals(a, s2_val, exe_precision); int_type* t = get_host_vec(a, s2_val, exe_vectors); auto pres = ::max(p1, p2); exe_precision.push(pres); if(p1) { t[0] = t[0]*(unsigned int)pow(10,pres-p1); t[1] = t[1]*(unsigned int)pow(10,pres-p1); }; if(p2) { n1 = n1*(unsigned int)pow(10,pres-p2); }; exe_type.push("NAME"); exe_value.push(""); bool_vectors.push(host_compare(t,n1,cmp_type)); } else { return 'R'; } } else if (ss.compare("AND") == 0) { char s3 = bool_vectors.top(); bool_vectors.pop(); char s2 = bool_vectors.top(); bool_vectors.pop(); exe_type.push("NAME"); bool_vectors.push(host_logical_and(s2,s3)); } else if (ss.compare("OR") == 0) { char s3 = bool_vectors.top(); bool_vectors.pop(); char s2 = bool_vectors.top(); bool_vectors.pop(); exe_type.push("NAME"); bool_vectors.push(host_logical_or(s2,s3)); } else { if(ss.compare("JOIN") == 0) process_error(2, "operation = is not valid"); //cout << "operation = is not valid" << endl; else process_error(2, "operation " + string(ss)+ " is not valid"); //cout << "operation " << ss << " is not valid" << endl; exit(0); // never gets here } }; }; return bool_vectors.top(); }
437fafc3a81e9d8fcac1a72fa878a1c216c2e2b9.cu
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "zone_map.h" void process_error(int severity, string err); // this should probably live in a utils header file bool fh_equal_to(const float_type x, const float_type y) { return (((x-y) < EPSILON) && ((x-y) > -EPSILON)); } bool fh_less(const float_type x, const float_type y) { return ((y-x) > EPSILON); } bool fh_greater(const float_type x, const float_type y) { return ((x-y) > EPSILON); } bool fh_greater_equal_to(const float_type x, const float_type y) { return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON))); } bool fh_less_equal_to(const float_type x, const float_type y) { return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON))); } char host_logical_and(char column1, char column2) { //cout << "AND " << column1 << " " << column2 << endl; if (column1 == 'A' && column2 == 'A') return 'A'; else if (column1 == 'N' || column2 == 'N') { return 'N'; } else return 'R'; } char host_logical_or(char column1, char column2) { //cout << "OR " << column1 << " " << column2 << endl; if (column1 == 'A' && column2 == 'A') return 'A'; else if (column1 == 'N' && column2 == 'N') return 'N'; else return 'R'; } char host_compare(int_type s, int_type d, int_type op_type) { char res = 'N'; if (op_type == 2 && d>s ) // > res = 'A'; else if (op_type == 1 && d<s) // < res = 'A'; else if (op_type == 6 && d>=s) // >= res = 'A'; else if (op_type == 5 && d<=s) // <= res = 'A'; else if (op_type == 4 && d==s)// = res = 'A'; else // != if(d!=s) res = 'A'; return res; } char host_compare(float_type s, float_type d, int_type op_type) { char res = 'N'; if (op_type == 2 && (d-s) > EPSILON) // > res = 'A'; else if (op_type == 1 && (s-d) > EPSILON) // < res = 'A'; else if (op_type == 6 && ((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) // >= res = 'A'; else if (op_type == 5 && ((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) // <= res = 'A'; else if (op_type == 4 && ((d-s) < EPSILON) && ((d-s) > -EPSILON))// = res = 'A'; else // != if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 'A'; return res; } char host_compare(int_type* column1, int_type d, int_type op_type) { char res = 'R'; //cout << "CMP " << column1[0] << " " << column1[1] << " " << d << " " << op_type << endl; if (op_type == 2) { // > if (column1[1] <= d) res = 'N'; else if (column1[0] > d) res = 'A'; } else if (op_type == 1) { // < if (column1[0] >= d) res = 'N'; else if (column1[1] < d) res = 'A'; } else if (op_type == 6) { // >= if (column1[1] < d) res = 'N'; else if (column1[0] >= d) res = 'A'; } else if (op_type == 5) { // <= if (column1[0] > d) res = 'N'; else if (column1[1] <= d) res = 'A'; } else if (op_type == 4 && column1[0] == d && column1[1] == d) { // = res = 'A'; }; //cout << "res " << res << endl; return res; } char host_compare(float_type* column1, float_type d, int_type op_type) { char res = 'R'; //cout << "CMP " << column1[0] << " " << column1[1] << " with " << d << endl; if (op_type == 2) { // > if(fh_less_equal_to(column1[1],d)) { res = 'N'; } else if(fh_greater(column1[0],d)) { res = 'A'; }; } else if (op_type == 1) { // < if(fh_less(column1[1],d)) { res = 'A'; } else if(fh_greater_equal_to(column1[0],d)) { res = 'N'; }; } else if (op_type == 6) { // >= if(fh_greater_equal_to(column1[0],d)) { res = 'A'; } else if(fh_less(column1[1],d)) { res = 'N'; }; } else if (op_type == 5) { // <= if(fh_less_equal_to(column1[1],d)) { res = 'A'; } else if(fh_greater(column1[0],d)) { res = 'N'; }; } else if (op_type == 4 && fh_equal_to(column1[0],d) && fh_equal_to(column1[1],d)) // = res = 'A'; //cout << "res " << res << endl; return res; } char host_compare(int_type* column1, int_type* column2, int_type op_type) { char res = 'R'; if (op_type == 2) { // > if(column1[0] > column2[1]) res = 'A'; else if(column1[1] <= column2[0]) res = 'N'; } else if (op_type == 1) { // < if(column1[1] < column2[0]) res = 'A'; else if(column1[0] >= column2[1]) res = 'N'; } else if (op_type == 6) { // >= if(column1[0] >= column2[1]) res = 'A'; else if(column1[1] < column2[0]) res = 'N'; } else if (op_type == 5) { // <= if(column1[1] <= column2[0]) res = 'A'; else if(column1[0] > column2[1]) res = 'N'; } else if (op_type == 4 && column1[0] == column2[1] && column1[1] == column2[0]) // = res = 'A'; return res; } char host_compare(float_type* column1, float_type* column2, int_type op_type) { char res = 'R'; if (op_type == 2) { // > if(fh_greater(column1[0],column2[1])) res = 'A'; else if(fh_less_equal_to(column1[1],column2[0])) res = 'N'; } else if (op_type == 1) { // < if(fh_less(column1[1],column2[0])) res = 'A'; else if(fh_greater_equal_to(column1[0],column2[1])) res = 'N'; } else if (op_type == 6) { // >= if(fh_greater_equal_to(column1[1],column2[0])) res = 'A'; else if(fh_less(column1[1],column2[0])) res = 'N'; } else if (op_type == 5) { // <= if(fh_less_equal_to(column1[1],column2[0])) res = 'A'; else if(fh_greater(column1[0],column2[1])) res = 'N'; } else if (op_type == 4 && fh_equal_to(column1[0], column2[1]) && fh_equal_to(column1[1],column2[0])) // = res = 'A'; return res; } char host_compare(float_type* column1, int_type* column2, int_type op_type) { char res = 'R'; if (op_type == 2) { // > if(fh_greater(column1[0],(float_type)column2[1])) res = 'A'; else if(fh_less_equal_to(column1[1],(float_type)column2[0])) res = 'N'; } else if (op_type == 1) { // < if(fh_less(column1[1],(float_type)column2[0])) res = 'A'; else if(fh_greater_equal_to(column1[0],(float_type)column2[1])) res = 'N'; } else if (op_type == 6) { // >= if(fh_greater_equal_to(column1[1],(float_type)column2[0])) res = 'A'; else if(fh_less(column1[1],(float_type)column2[0])) res = 'N'; } else if (op_type == 5) { // <= if(fh_less_equal_to(column1[1],(float_type)column2[0])) res = 'A'; else if(fh_greater(column1[0],(float_type)column2[1])) res = 'N'; } else if (op_type == 4 && fh_equal_to(column1[0],(float_type) column2[1]) && fh_equal_to(column1[1],(float_type)column2[0])) // = res = 'A'; return res; } float_type* host_op(int_type* column1, float_type* column2, string op_type, int reverse) { float_type* temp = (float_type*)malloc(2*float_size); temp[0] = (float_type)column1[0]; temp[1] = (float_type)column1[1]; if(reverse == 0) { if (op_type.compare("MUL") == 0) { temp[0] = temp[0] * column2[0]; temp[1] = temp[1] * column2[1]; } else if (op_type.compare("ADD") == 0) { temp[0] = temp[0] + column2[0]; temp[1] = temp[1] + column2[1]; } else if (op_type.compare("MINUS") == 0) { temp[0] = column2[0] - temp[0]; temp[1] = column2[1] - temp[1]; } else { temp[0] = column2[0] / temp[0]; temp[1] = column2[1] / temp[1]; } } else { if (op_type.compare("MUL") == 0) { temp[0] = temp[0] * column2[0]; temp[1] = temp[1] * column2[1]; } else if (op_type.compare("ADD") == 0) { temp[0] = temp[0] + column2[0]; temp[1] = temp[1] + column2[1]; } else if (op_type.compare("MINUS") == 0) { temp[0] = temp[0] - column2[0]; temp[1] = temp[1] - column2[1]; } else { temp[0] = temp[0] / column2[0]; temp[1] = temp[1] / column2[1]; } }; return temp; } int_type* host_op(int_type* column1, int_type* column2, string op_type, int reverse) { int_type* temp = (int_type*)malloc(2*int_size); if(reverse == 0) { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * column2[0]; temp[1] = column1[1] * column2[1]; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + column2[0]; temp[1] = column1[1] + column2[1]; } else if (op_type.compare("MINUS") == 0) { temp[0] = column1[0] - column2[0]; temp[1] = column1[1] - column2[1]; } else { temp[0] = column1[0] / column2[0]; temp[1] = column1[1] / column2[1]; } } else { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * column2[0]; temp[1] = column1[1] * column2[1]; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + column2[0]; temp[1] = column1[1] + column2[1]; } else if (op_type.compare("MINUS") == 0) { temp[0] = column2[0] - column1[0]; temp[1] = column2[1] - column1[1]; } else { temp[0] = column2[0] / column1[0]; temp[1] = column2[1] / column1[1]; } } return temp; } float_type* host_op(float_type* column1, float_type* column2, string op_type, int reverse) { float_type* temp = (float_type*)malloc(2*float_size); if(reverse == 0) { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * column2[0]; temp[1] = column1[1] * column2[1]; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + column2[0]; temp[1] = column1[1] + column2[1]; } else if (op_type.compare("MINUS") == 0) { temp[0] = column1[0] - column2[0]; temp[1] = column1[1] - column2[1]; } else { temp[0] = column1[0] / column2[0]; temp[1] = column1[1] / column2[1]; } } else { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * column2[0]; temp[1] = column1[1] * column2[1]; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + column2[0]; temp[1] = column1[1] + column2[1]; } else if (op_type.compare("MINUS") == 0) { temp[0] = column2[0] - column1[0]; temp[1] = column2[1] - column1[1]; } else { temp[0] = column2[0] / column1[0]; temp[1] = column2[1] / column1[1]; } } return temp; } int_type* host_op(int_type* column1, int_type d, string op_type, int reverse) { int_type* temp = (int_type*)malloc(2*int_size); if(reverse == 0) { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * d; temp[1] = column1[1] * d; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + d; temp[1] = column1[1] + d; } else if (op_type.compare("MINUS") == 0) { temp[0] = column1[0] - d; temp[1] = column1[1] - d; } else { temp[0] = column1[0] / d; temp[1] = column1[1] / d; } } else { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * d; temp[1] = column1[1] * d; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + d; temp[1] = column1[1] + d; } else if (op_type.compare("MINUS") == 0) { temp[0] = d - column1[0]; temp[1] = d - column1[1]; } else { temp[0] = d / column1[0]; temp[1] = d / column1[1]; } }; return temp; } float_type* host_op(int_type* column1, float_type d, string op_type, int reverse) { float_type* temp = (float_type*)malloc(2*float_size); temp[0] = (float_type)column1[0]; temp[1] = (float_type)column1[1]; float_type* temp1 = (float_type*)malloc(2*float_size); if(reverse == 0) { if (op_type.compare("MUL") == 0) { temp1[0] = temp[0] * d; temp1[1] = temp[1] * d; } else if (op_type.compare("ADD") == 0) { temp1[0] = temp[0] + d; temp1[1] = temp[1] + d; } else if (op_type.compare("MINUS") == 0) { temp1[0] = temp[0] - d; temp1[1] = temp[1] - d; } else { temp1[0] = temp[0] / d; temp1[1] = temp[1] / d; } } else { if (op_type.compare("MUL") == 0) { temp1[0] = temp[0] * d; temp1[1] = temp[1] * d; } else if (op_type.compare("ADD") == 0) { temp1[0] = temp[0] + d; temp1[1] = temp[1] + d; } else if (op_type.compare("MINUS") == 0) { temp1[0] = d - temp[0]; temp1[1] = d - temp[1]; } else { temp1[0] = d / temp[0]; temp1[1] = d / temp[1]; } }; free(temp); return temp1; } float_type* host_op(float_type* column1, float_type d, string op_type,int reverse) { float_type* temp = (float_type*)malloc(2*float_size); if(reverse == 0) { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * d; temp[1] = column1[1] * d; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + d; temp[1] = column1[1] + d; } else if (op_type.compare("MINUS") == 0) { temp[0] = column1[0] - d; temp[1] = column1[1] - d; } else { temp[0] = column1[0] / d; temp[1] = column1[1] / d; } } else { if (op_type.compare("MUL") == 0) { temp[0] = column1[0] * d; temp[1] = column1[1] * d; } else if (op_type.compare("ADD") == 0) { temp[0] = column1[0] + d; temp[1] = column1[1] + d; } else if (op_type.compare("MINUS") == 0) { temp[0] = d - column1[0]; temp[1] = d - column1[1]; } else { temp[0] = d / column1[0]; temp[1] = d / column1[1]; } }; return temp; } unsigned int precision_func(unsigned int& p1, unsigned int& p2, string op) { if (op.compare("DIV") != 0 ) { unsigned int res; if (op.compare("MUL") != 0 ) { if(p1 > p2) { res = p1; p2 = p1-p2; p1 = 0; } else { res = p1; p1 = p2-p1; p2 = 0; }; return res; } else { //std::swap(p1,p2); res = p1+p2; p1 = 0; p2 = 0; return res; }; } else { if(p1 == p2) { p1 = p1+4; p2 = 0; return p1; } else { if(p1 > p2) { p1 = p1 + (p1-p2) + 4; p2 = 0; return p1; } else { p2 = p2 + (p2-p1) + 4; p1 = 0; return p2; } } }; } //CudaSet a contains two records - with all minimum and maximum values of the segment //We need to determine if this segment needs to be processed //The check takes place in host's memory char zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, queue<unsigned int> op_nums_precision, CudaSet* a, unsigned int segment) { stack<string> exe_type; stack<string> exe_value; stack<int_type*> exe_vectors; stack<float_type*> exe_vectors_f; stack<int_type> exe_nums; stack<char> bool_vectors; stack<unsigned int> exe_precision; string s1, s2, s1_val, s2_val; int_type n1, n2, res; if(a->not_compressed) return 'R'; //first we need to set all host arrays [0] and [1] of t to min and max values of appropriate files set<string> uniques; queue<string> fields(op_value); CudaSet *t; FILE* f; unsigned int cnt; string f1; while(!fields.empty()) { if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) { if(a->filtered) t = varNames[a->source_name]; else t = a; // copy t min and max values to a only if int, decimal or float if(t->type[fields.front()] <= 1) { f1 = t->load_file_name + "." + fields.front() + "." + to_string(segment); f = fopen (f1.c_str() , "rb" ); if(!f) { cout << "Error opening " << f1 << " file " << endl; exit(0); }; fread((char *)&cnt, 4, 1, f); if (t->type[fields.front()] == 0) { a->h_columns_int[fields.front()].resize(2); fread((char *)&a->h_columns_int[fields.front()][0], 8, 1, f); fread((char *)&a->h_columns_int[fields.front()][1], 8, 1, f); fseek(f, 8+cnt, SEEK_CUR); fread((char *)&a->mRecCount, 4, 1, f); //cout << endl << "ZONE " << a->mRecCount << endl; fread((char *)&cnt, 4, 1, f); //cout << "file " << f1 << " " << segment << " " << a->h_columns_int[fields.front()][0] << ":" << a->h_columns_int[fields.front()][1] << endl; } else { long long int t; a->h_columns_float[fields.front()].resize(2); fread((char *)&t, 8, 1, f); a->h_columns_float[fields.front()][0] = (float_type)t/100.0; fread((char *)&t, 8, 1, f); a->h_columns_float[fields.front()][1] = (float_type)t/100.0; //cout << "file " << f1 << " " << segment << " " << a->h_columns_float[a->type_index[colIndex]][0] << ":" << a->h_columns_float[a->type_index[colIndex]][1] << endl; }; fclose(f); }; }; uniques.insert(fields.front()); fields.pop(); }; for(int i=0; !op_type.empty(); ++i, op_type.pop()) { string ss = op_type.front(); //cout << ss << endl; if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("FLOAT") == 0 || ss.compare("STRING") == 0) { exe_type.push(ss); if (ss.compare("NUMBER") == 0) { exe_nums.push(op_nums.front()); op_nums.pop(); exe_precision.push(op_nums_precision.front()); op_nums_precision.pop(); } else if (ss.compare("NAME") == 0) { if(var_exists(a, op_value.front())) { exe_value.push(op_value.front()); op_value.pop(); } else { process_error(1, "Couldn't find column " + op_value.front()); //cout << "Couldn't find column " << op_value.front() << endl; //exit(0); }; } else if (ss.compare("STRING") == 0) { exe_value.push(op_value.front()); op_value.pop(); } } else { if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) { // get 2 values from the stack s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); auto p1 = exe_precision.top(); exe_precision.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto pres = precision_func(p1, p2, ss); exe_precision.push(pres); if(p1) n1 = n1*(unsigned int)pow(10,p1); if(p2) n2 = n2*(unsigned int)pow(10,p2); if (ss.compare("ADD") == 0 ) res = n1+n2; else if (ss.compare("MUL") == 0 ) res = n1*n2; else if (ss.compare("DIV") == 0 ) res = n1/n2; else res = n1-n2; exe_type.push("NUMBER"); exe_nums.push(res); } else if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); int_type val; int_type* t = get_host_vec(a, s1_val, exe_vectors); exe_type.push("NAME"); exe_value.push(""); exe_precision.push(0); } else if (s2.compare("NAME") == 0 && s1.compare("STRING") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); int_type val; int_type* t = get_host_vec(a, s2_val, exe_vectors); //cout << "name " << s2_val << endl; exe_type.push("NAME"); exe_value.push(""); exe_precision.push(0); } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { s1_val = exe_value.top(); exe_value.pop(); n1 = exe_nums.top(); exe_nums.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto p1 = get_decimals(a, s1_val, exe_precision); int_type* t = get_host_vec(a, s1_val, exe_vectors); auto pres = precision_func(p1, p2, ss); exe_precision.push(pres); if(p1) { t[0] = t[0]*(unsigned int)pow(10,p1); t[1] = t[1]*(unsigned int)pow(10,p1); }; if(p2) { n1 = n1*(unsigned int)pow(10,p2); }; exe_type.push("NAME"); exe_value.push(""); exe_vectors.push(host_op(t,n1,ss,1)); } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s2_val = exe_value.top(); exe_value.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto p1 = get_decimals(a, s2_val, exe_precision); int_type* t = get_host_vec(a, s2_val, exe_vectors); auto pres = precision_func(p1, p2, ss); exe_precision.push(pres); if(p1) { t[0] = t[0]*(unsigned int)pow(10,p1); t[1] = t[1]*(unsigned int)pow(10,p1); }; if(p2) { n1 = n1*(unsigned int)pow(10,p2); }; exe_type.push("NAME"); exe_value.push(""); exe_vectors.push(host_op(t,n1,ss,0)); } else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); auto p1 = get_decimals(a, s1_val, exe_precision); auto p2 = get_decimals(a, s2_val, exe_precision); int_type* t = get_host_vec(a, s1_val, exe_vectors); int_type* s3 = get_host_vec(a, s2_val, exe_vectors);; exe_type.push("NAME"); exe_value.push(""); auto pres = precision_func(p1, p2, ss); exe_precision.push(pres); if(p1) { t[0] = t[0]*(unsigned int)pow(10,p1); t[1] = t[1]*(unsigned int)pow(10,p1); }; if(p2) { s3[0] = s3[0]*(unsigned int)pow(10,p2); s3[1] = s3[1]*(unsigned int)pow(10,p2); }; exe_vectors.push(host_op(t,s3,ss,1)); if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) == a->columnNames.end()) delete [] t; if(std::find(a->columnNames.begin(), a->columnNames.end(), s2_val) == a->columnNames.end()) delete [] s3; } } else if (ss.compare("CMP") == 0) { int_type cmp_type = op_nums.front(); op_nums.pop(); s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); auto p1 = exe_precision.top(); exe_precision.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto pres = std::max(p1, p2); exe_precision.push(pres); exe_type.push("NAME"); exe_value.push(""); if(p1) n1 = n1*(unsigned int)pow(10,pres-p1); if(p2) n2 = n2*(unsigned int)pow(10,pres-p2); bool_vectors.push(host_compare(n1,n2,cmp_type)); } else if (s1.compare("STRING") == 0 && s2.compare("NAME") == 0) { time_t tt; s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); int_type val; int_type* t = get_host_vec(a, s2_val, exe_vectors); auto pos = s1_val.find("date()"); bool_vectors.push('R'); exe_type.push("NAME"); exe_value.push(""); } else if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) { s2_val = exe_value.top(); exe_value.pop(); s1_val = exe_value.top(); exe_value.pop(); int_type val; time_t tt; int_type* t = get_host_vec(a, s1_val, exe_vectors); bool_vectors.push('R'); exe_type.push("NAME"); exe_value.push(""); } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s1_val = exe_value.top(); exe_value.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto p1 = get_decimals(a, s1_val, exe_precision); int_type* t = get_host_vec(a, s1_val, exe_vectors); auto pres = std::max(p1, p2); exe_precision.push(pres); if(p1) { t[0] = t[0]*(unsigned int)pow(10,pres-p1); t[1] = t[1]*(unsigned int)pow(10,pres-p1); }; if(p2) { n1 = n1*(unsigned int)pow(10,pres-p2); }; exe_type.push("NAME"); exe_value.push(""); bool_vectors.push(host_compare(t,n1,cmp_type)); } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); n1 = exe_nums.top(); exe_nums.pop(); s2_val = exe_value.top(); exe_value.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto p1 = get_decimals(a, s2_val, exe_precision); int_type* t = get_host_vec(a, s2_val, exe_vectors); auto pres = std::max(p1, p2); exe_precision.push(pres); if(p1) { t[0] = t[0]*(unsigned int)pow(10,pres-p1); t[1] = t[1]*(unsigned int)pow(10,pres-p1); }; if(p2) { n1 = n1*(unsigned int)pow(10,pres-p2); }; exe_type.push("NAME"); exe_value.push(""); bool_vectors.push(host_compare(t,n1,cmp_type)); } else { return 'R'; } } else if (ss.compare("AND") == 0) { char s3 = bool_vectors.top(); bool_vectors.pop(); char s2 = bool_vectors.top(); bool_vectors.pop(); exe_type.push("NAME"); bool_vectors.push(host_logical_and(s2,s3)); } else if (ss.compare("OR") == 0) { char s3 = bool_vectors.top(); bool_vectors.pop(); char s2 = bool_vectors.top(); bool_vectors.pop(); exe_type.push("NAME"); bool_vectors.push(host_logical_or(s2,s3)); } else { if(ss.compare("JOIN") == 0) process_error(2, "operation = is not valid"); //cout << "operation = is not valid" << endl; else process_error(2, "operation " + string(ss)+ " is not valid"); //cout << "operation " << ss << " is not valid" << endl; exit(0); // never gets here } }; }; return bool_vectors.top(); }
1bfc9d26f2eb68c208832cdcffdaca2b7860804c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Adapted from interp.cpp from Caffe util by Pauline Luc // Originally developed by George Papandreou #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/hip/UpSample.cuh> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_trilinear3d_out_frame( const int n, const accscalar_t rdepth, const accscalar_t rheight, const accscalar_t rwidth, const bool align_corners, const PackedTensorAccessor<scalar_t, 5> idata, PackedTensorAccessor<scalar_t, 5> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int depth1 = idata.size(2); const int height1 = idata.size(3); const int width1 = idata.size(4); const int depth2 = odata.size(2); const int height2 = odata.size(3); const int width2 = odata.size(4); if (index < n) { const int w2 = (index % (height2 * width2)) % width2; // 0:width2-1 const int h2 = (index % (height2 * width2)) / width2; // 0:height2-1 const int t2 = index / (height2 * width2); // 0:depth2-1 // special case: just copy if (depth1 == depth2 && height1 == height2 && width1 == width2) { const int t1 = t2; const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = idata[n][c][t1][h1][w1]; odata[n][c][t2][h2][w2] = val; } } return; } // const accscalar_t t1r = area_pixel_compute_source_index<accscalar_t>( rdepth, t2, align_corners, /*cubic=*/false); const int t1 = t1r; const int t1p = (t1 < depth1 - 1) ? 1 : 0; const accscalar_t t1lambda = t1r - t1; const accscalar_t t0lambda = static_cast<accscalar_t>(1) - t1lambda; // const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>( rheight, h2, align_corners, /*cubic=*/false); const int h1 = h1r; const int h1p = (h1 < height1 - 1) ? 1 : 0; const accscalar_t h1lambda = h1r - h1; const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda; // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const accscalar_t val = t0lambda * (h0lambda * (w0lambda * idata[n][c][t1][h1][w1] + w1lambda * idata[n][c][t1][h1][w1 + w1p]) + h1lambda * (w0lambda * idata[n][c][t1][h1 + h1p][w1] + w1lambda * idata[n][c][t1][h1 + h1p][w1 + w1p])) + t1lambda * (h0lambda * (w0lambda * idata[n][c][t1 + t1p][h1][w1] + w1lambda * idata[n][c][t1 + t1p][h1][w1 + w1p]) + h1lambda * (w0lambda * idata[n][c][t1 + t1p][h1 + h1p][w1] + w1lambda * idata[n][c][t1 + t1p][h1 + h1p][w1 + w1p])); odata[n][c][t2][h2][w2] = static_cast<scalar_t>(val); } } } } // Backward (adjoint) operation 1 <- 2 (accumulates) template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_trilinear3d_backward_out_frame( const int n, const accscalar_t rdepth, const accscalar_t rheight, const accscalar_t rwidth, const bool align_corners, PackedTensorAccessor<scalar_t, 5> idata, const PackedTensorAccessor<scalar_t, 5> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int depth1 = idata.size(2); const int height1 = idata.size(3); const int width1 = idata.size(4); const int depth2 = odata.size(2); const int height2 = odata.size(3); const int width2 = odata.size(4); if (index < n) { const int w2 = (index % (height2 * width2)) % width2; // 0:width2-1 const int h2 = (index % (height2 * width2)) / width2; // 0:height2-1 const int t2 = index / (height2 * width2); // 0:depth2-1 // special case: just copy if (depth1 == depth2 && height1 == height2 && width1 == width2) { const int t1 = t2; const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = odata[n][c][t1][h1][w1]; idata[n][c][t2][h2][w2] = val; } } return; } // const accscalar_t t1r = area_pixel_compute_source_index<accscalar_t>( rdepth, t2, align_corners, /*cubic=*/false); const int t1 = t1r; const int t1p = (t1 < depth1 - 1) ? 1 : 0; const accscalar_t t1lambda = t1r - t1; const accscalar_t t0lambda = static_cast<accscalar_t>(1) - t1lambda; // const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>( rheight, h2, align_corners, /*cubic=*/false); const int h1 = h1r; const int h1p = (h1 < height1 - 1) ? 1 : 0; const accscalar_t h1lambda = h1r - h1; const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda; // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t d2val = odata[n][c][t2][h2][w2]; atomicAdd( &idata[n][c][t1][h1][w1], static_cast<scalar_t>(t0lambda * h0lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1][h1][w1 + w1p], static_cast<scalar_t>(t0lambda * h0lambda * w1lambda * d2val)); atomicAdd( &idata[n][c][t1][h1 + h1p][w1], static_cast<scalar_t>(t0lambda * h1lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1][h1 + h1p][w1 + w1p], static_cast<scalar_t>(t0lambda * h1lambda * w1lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1][w1], static_cast<scalar_t>(t1lambda * h0lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1][w1 + w1p], static_cast<scalar_t>(t1lambda * h0lambda * w1lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1 + h1p][w1], static_cast<scalar_t>(t1lambda * h1lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1 + h1p][w1 + w1p], static_cast<scalar_t>(t1lambda * h1lambda * w1lambda * d2val)); } } } } static void upsample_trilinear3d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_trilinear3d_out_cuda", {input_arg, output_arg}); AT_CHECK( output_size.size() == 3, "It is expected output_size equals to 3, but got size ", output_size.size()); int output_depth = output_size[0]; int output_height = output_size[1]; int output_width = output_size[2]; int nbatch = input.size(0); int channels = input.size(1); int input_depth = input.size(2); int input_height = input.size(3); int input_width = input.size(4); upsample_3d_shape_check( input, Tensor(), nbatch, channels, input_depth, input_height, input_width, output_depth, output_height, output_width); output.resize_({input.size(0), input.size(1), output_depth, output_height, output_width}); output.zero_(); AT_ASSERT( input_depth > 0 && input_height > 0 && input_width > 0 && output_depth > 0 && output_height > 0 && output_width > 0); const int num_kernels = output_depth * output_height * output_width; const int num_threads = ::min( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_trilinear3d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.packed_accessor<scalar_t, 5>(); auto odata = output.packed_accessor<scalar_t, 5>(); const accscalar_t rdepth = area_pixel_compute_scale<accscalar_t>( input_depth, output_depth, align_corners); const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>( input_height, output_height, align_corners); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners); hipLaunchKernelGGL(( upsample_trilinear3d_out_frame<scalar_t, accscalar_t>) , dim3(cuda::ATenCeilDiv(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, rdepth, rheight, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(hipGetLastError()); } static void upsample_trilinear3d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( "upsample_trilinear3d_backward_out_cuda", {grad_output_arg, grad_input_arg}); AT_CHECK( output_size.size() == 3, "It is expected output_size equals to 3, but got size ", output_size.size()); AT_CHECK( input_size.size() == 5, "It is expected input_size equals to 5, but got size ", input_size.size()); int output_depth = output_size[0]; int output_height = output_size[1]; int output_width = output_size[2]; int nbatch = input_size[0]; int channels = input_size[1]; int input_depth = input_size[2]; int input_height = input_size[3]; int input_width = input_size[4]; upsample_3d_shape_check( Tensor(), grad_output_, nbatch, channels, input_depth, input_height, input_width, output_depth, output_height, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_( {nbatch, channels, input_depth, input_height, input_width}); grad_input.zero_(); const int num_kernels = output_depth * output_height * output_width; const int num_threads = ::min( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_trilinear3d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.packed_accessor<scalar_t, 5>(); auto odata = grad_output.packed_accessor<scalar_t, 5>(); const accscalar_t rdepth = area_pixel_compute_scale<accscalar_t>( input_depth, output_depth, align_corners); const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>( input_height, output_height, align_corners); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners); hipLaunchKernelGGL(( upsample_trilinear3d_backward_out_frame<scalar_t, accscalar_t>) , dim3(cuda::ATenCeilDiv(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, rdepth, rheight, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(hipGetLastError()); } } // namespace Tensor& upsample_trilinear3d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners) { upsample_trilinear3d_out_cuda_template( output, input, output_size, align_corners); return output; } Tensor upsample_trilinear3d_cuda( const Tensor& input, IntArrayRef output_size, bool align_corners) { Tensor output = at::empty_like(input); upsample_trilinear3d_out_cuda_template( output, input, output_size, align_corners); return output; } Tensor& upsample_trilinear3d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { upsample_trilinear3d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners); return grad_input; } Tensor upsample_trilinear3d_backward_cuda( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { Tensor grad_input = at::empty_like(grad_output); upsample_trilinear3d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners); return grad_input; } } // namespace native } // namespace at
1bfc9d26f2eb68c208832cdcffdaca2b7860804c.cu
// Adapted from interp.cpp from Caffe util by Pauline Luc // Originally developed by George Papandreou #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/cuda/UpSample.cuh> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_trilinear3d_out_frame( const int n, const accscalar_t rdepth, const accscalar_t rheight, const accscalar_t rwidth, const bool align_corners, const PackedTensorAccessor<scalar_t, 5> idata, PackedTensorAccessor<scalar_t, 5> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int depth1 = idata.size(2); const int height1 = idata.size(3); const int width1 = idata.size(4); const int depth2 = odata.size(2); const int height2 = odata.size(3); const int width2 = odata.size(4); if (index < n) { const int w2 = (index % (height2 * width2)) % width2; // 0:width2-1 const int h2 = (index % (height2 * width2)) / width2; // 0:height2-1 const int t2 = index / (height2 * width2); // 0:depth2-1 // special case: just copy if (depth1 == depth2 && height1 == height2 && width1 == width2) { const int t1 = t2; const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = idata[n][c][t1][h1][w1]; odata[n][c][t2][h2][w2] = val; } } return; } // const accscalar_t t1r = area_pixel_compute_source_index<accscalar_t>( rdepth, t2, align_corners, /*cubic=*/false); const int t1 = t1r; const int t1p = (t1 < depth1 - 1) ? 1 : 0; const accscalar_t t1lambda = t1r - t1; const accscalar_t t0lambda = static_cast<accscalar_t>(1) - t1lambda; // const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>( rheight, h2, align_corners, /*cubic=*/false); const int h1 = h1r; const int h1p = (h1 < height1 - 1) ? 1 : 0; const accscalar_t h1lambda = h1r - h1; const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda; // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const accscalar_t val = t0lambda * (h0lambda * (w0lambda * idata[n][c][t1][h1][w1] + w1lambda * idata[n][c][t1][h1][w1 + w1p]) + h1lambda * (w0lambda * idata[n][c][t1][h1 + h1p][w1] + w1lambda * idata[n][c][t1][h1 + h1p][w1 + w1p])) + t1lambda * (h0lambda * (w0lambda * idata[n][c][t1 + t1p][h1][w1] + w1lambda * idata[n][c][t1 + t1p][h1][w1 + w1p]) + h1lambda * (w0lambda * idata[n][c][t1 + t1p][h1 + h1p][w1] + w1lambda * idata[n][c][t1 + t1p][h1 + h1p][w1 + w1p])); odata[n][c][t2][h2][w2] = static_cast<scalar_t>(val); } } } } // Backward (adjoint) operation 1 <- 2 (accumulates) template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_trilinear3d_backward_out_frame( const int n, const accscalar_t rdepth, const accscalar_t rheight, const accscalar_t rwidth, const bool align_corners, PackedTensorAccessor<scalar_t, 5> idata, const PackedTensorAccessor<scalar_t, 5> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int depth1 = idata.size(2); const int height1 = idata.size(3); const int width1 = idata.size(4); const int depth2 = odata.size(2); const int height2 = odata.size(3); const int width2 = odata.size(4); if (index < n) { const int w2 = (index % (height2 * width2)) % width2; // 0:width2-1 const int h2 = (index % (height2 * width2)) / width2; // 0:height2-1 const int t2 = index / (height2 * width2); // 0:depth2-1 // special case: just copy if (depth1 == depth2 && height1 == height2 && width1 == width2) { const int t1 = t2; const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = odata[n][c][t1][h1][w1]; idata[n][c][t2][h2][w2] = val; } } return; } // const accscalar_t t1r = area_pixel_compute_source_index<accscalar_t>( rdepth, t2, align_corners, /*cubic=*/false); const int t1 = t1r; const int t1p = (t1 < depth1 - 1) ? 1 : 0; const accscalar_t t1lambda = t1r - t1; const accscalar_t t0lambda = static_cast<accscalar_t>(1) - t1lambda; // const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>( rheight, h2, align_corners, /*cubic=*/false); const int h1 = h1r; const int h1p = (h1 < height1 - 1) ? 1 : 0; const accscalar_t h1lambda = h1r - h1; const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda; // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t d2val = odata[n][c][t2][h2][w2]; atomicAdd( &idata[n][c][t1][h1][w1], static_cast<scalar_t>(t0lambda * h0lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1][h1][w1 + w1p], static_cast<scalar_t>(t0lambda * h0lambda * w1lambda * d2val)); atomicAdd( &idata[n][c][t1][h1 + h1p][w1], static_cast<scalar_t>(t0lambda * h1lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1][h1 + h1p][w1 + w1p], static_cast<scalar_t>(t0lambda * h1lambda * w1lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1][w1], static_cast<scalar_t>(t1lambda * h0lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1][w1 + w1p], static_cast<scalar_t>(t1lambda * h0lambda * w1lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1 + h1p][w1], static_cast<scalar_t>(t1lambda * h1lambda * w0lambda * d2val)); atomicAdd( &idata[n][c][t1 + t1p][h1 + h1p][w1 + w1p], static_cast<scalar_t>(t1lambda * h1lambda * w1lambda * d2val)); } } } } static void upsample_trilinear3d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_trilinear3d_out_cuda", {input_arg, output_arg}); AT_CHECK( output_size.size() == 3, "It is expected output_size equals to 3, but got size ", output_size.size()); int output_depth = output_size[0]; int output_height = output_size[1]; int output_width = output_size[2]; int nbatch = input.size(0); int channels = input.size(1); int input_depth = input.size(2); int input_height = input.size(3); int input_width = input.size(4); upsample_3d_shape_check( input, Tensor(), nbatch, channels, input_depth, input_height, input_width, output_depth, output_height, output_width); output.resize_({input.size(0), input.size(1), output_depth, output_height, output_width}); output.zero_(); AT_ASSERT( input_depth > 0 && input_height > 0 && input_width > 0 && output_depth > 0 && output_height > 0 && output_width > 0); const int num_kernels = output_depth * output_height * output_width; const int num_threads = std::min( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_trilinear3d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.packed_accessor<scalar_t, 5>(); auto odata = output.packed_accessor<scalar_t, 5>(); const accscalar_t rdepth = area_pixel_compute_scale<accscalar_t>( input_depth, output_depth, align_corners); const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>( input_height, output_height, align_corners); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners); upsample_trilinear3d_out_frame<scalar_t, accscalar_t> <<<cuda::ATenCeilDiv(num_kernels, num_threads), num_threads, 0, stream>>>( num_kernels, rdepth, rheight, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(cudaGetLastError()); } static void upsample_trilinear3d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( "upsample_trilinear3d_backward_out_cuda", {grad_output_arg, grad_input_arg}); AT_CHECK( output_size.size() == 3, "It is expected output_size equals to 3, but got size ", output_size.size()); AT_CHECK( input_size.size() == 5, "It is expected input_size equals to 5, but got size ", input_size.size()); int output_depth = output_size[0]; int output_height = output_size[1]; int output_width = output_size[2]; int nbatch = input_size[0]; int channels = input_size[1]; int input_depth = input_size[2]; int input_height = input_size[3]; int input_width = input_size[4]; upsample_3d_shape_check( Tensor(), grad_output_, nbatch, channels, input_depth, input_height, input_width, output_depth, output_height, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_( {nbatch, channels, input_depth, input_height, input_width}); grad_input.zero_(); const int num_kernels = output_depth * output_height * output_width; const int num_threads = std::min( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_trilinear3d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.packed_accessor<scalar_t, 5>(); auto odata = grad_output.packed_accessor<scalar_t, 5>(); const accscalar_t rdepth = area_pixel_compute_scale<accscalar_t>( input_depth, output_depth, align_corners); const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>( input_height, output_height, align_corners); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners); upsample_trilinear3d_backward_out_frame<scalar_t, accscalar_t> <<<cuda::ATenCeilDiv(num_kernels, num_threads), num_threads, 0, stream>>>( num_kernels, rdepth, rheight, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(cudaGetLastError()); } } // namespace Tensor& upsample_trilinear3d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners) { upsample_trilinear3d_out_cuda_template( output, input, output_size, align_corners); return output; } Tensor upsample_trilinear3d_cuda( const Tensor& input, IntArrayRef output_size, bool align_corners) { Tensor output = at::empty_like(input); upsample_trilinear3d_out_cuda_template( output, input, output_size, align_corners); return output; } Tensor& upsample_trilinear3d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { upsample_trilinear3d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners); return grad_input; } Tensor upsample_trilinear3d_backward_cuda( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { Tensor grad_input = at::empty_like(grad_output); upsample_trilinear3d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners); return grad_input; } } // namespace native } // namespace at
dc47238b855d8afbca7e5927a293ddde79b0e8c9.hip
// !!! This is a file automatically generated by hipify!!! #include "stdio.h" #include<iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> //Defining number of elements in Array #define N 5 //Kernel function for squaring number __global__ void gpuSquare(float *d_in, float *d_out) { //Getting thread index for current kernel int tid = threadIdx.x; // handle the data at this index float temp = d_in[tid]; d_out[tid] = temp*temp; } int main(void) { //Defining Arrays for host float h_in[N], h_out[N]; //Defining Pointers for device float *d_in, *d_out; // allocate the memory on the cpu hipMalloc((void**)&d_in, N * sizeof(float)); hipMalloc((void**)&d_out, N * sizeof(float)); //Initializing Array for (int i = 0; i < N; i++) { h_in[i] = i; } //Copy Array from host to device hipMemcpy(d_in, h_in, N * sizeof(float), hipMemcpyHostToDevice); //Calling square kernel with one block and N threads per block hipLaunchKernelGGL(( gpuSquare), dim3(1), dim3(N), 0, 0, d_in, d_out); //Coping result back to host from device memory hipMemcpy(h_out, d_out, N * sizeof(float), hipMemcpyDeviceToHost); //Printing result on console printf("Square of Number on GPU \n"); for (int i = 0; i < N; i++) { printf("The square of %f is %f\n", h_in[i], h_out[i]); } //Free up memory hipFree(d_in); hipFree(d_out); return 0; }
dc47238b855d8afbca7e5927a293ddde79b0e8c9.cu
#include "stdio.h" #include<iostream> #include <cuda.h> #include <cuda_runtime.h> //Defining number of elements in Array #define N 5 //Kernel function for squaring number __global__ void gpuSquare(float *d_in, float *d_out) { //Getting thread index for current kernel int tid = threadIdx.x; // handle the data at this index float temp = d_in[tid]; d_out[tid] = temp*temp; } int main(void) { //Defining Arrays for host float h_in[N], h_out[N]; //Defining Pointers for device float *d_in, *d_out; // allocate the memory on the cpu cudaMalloc((void**)&d_in, N * sizeof(float)); cudaMalloc((void**)&d_out, N * sizeof(float)); //Initializing Array for (int i = 0; i < N; i++) { h_in[i] = i; } //Copy Array from host to device cudaMemcpy(d_in, h_in, N * sizeof(float), cudaMemcpyHostToDevice); //Calling square kernel with one block and N threads per block gpuSquare<<<1, N>>>(d_in, d_out); //Coping result back to host from device memory cudaMemcpy(h_out, d_out, N * sizeof(float), cudaMemcpyDeviceToHost); //Printing result on console printf("Square of Number on GPU \n"); for (int i = 0; i < N; i++) { printf("The square of %f is %f\n", h_in[i], h_out[i]); } //Free up memory cudaFree(d_in); cudaFree(d_out); return 0; }
799fd9ebabf32fd1d6573abf3b60702e94507d06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<cstdio> using namespace std; __global__ void mini1(int *a,int *b,int n) { int block=256*blockIdx.x; int mini=7888888; for(int i=block;i<min(256+block,n);i++) { if(mini>a[i]) { mini=a[i]; } } b[blockIdx.x]=mini; } int main() { cout<<"Enter the size of array"<<endl; int n; cin>>n; int a[n]; hipEvent_t start,end; hipEventCreate(&start); hipEventCreate(&end); for(int i=0;i<n;i++) { cout<<"Enter elements: "; cin>>a[i]; } int *ad,*bd; int size=n*sizeof(int); hipMalloc(&ad,size); hipMemcpy(ad,a,size,hipMemcpyHostToDevice); int grids=ceil(n*1.0f/256.0f); hipMalloc(&bd,grids*sizeof(int)); dim3 grid(grids,1); dim3 block(1,1); hipEventRecord(start); while(n>1) { hipLaunchKernelGGL(( mini1), dim3(grids),dim3(block), 0, 0, ad,bd,n); n=ceil(n*1.0f/256.0f); hipMemcpy(ad,bd,n*sizeof(int),hipMemcpyDeviceToDevice); } hipEventRecord(end); hipEventSynchronize(end); float time=0; hipEventElapsedTime(&time,start,end); int ans[2]; hipMemcpy(ans,ad,4,hipMemcpyDeviceToHost); cout<<"The minimum element is"<<ans[0]<<endl; cout<<"The time required dor it is"; cout<<time<<endl; }
799fd9ebabf32fd1d6573abf3b60702e94507d06.cu
#include<iostream> #include<cstdio> using namespace std; __global__ void mini1(int *a,int *b,int n) { int block=256*blockIdx.x; int mini=7888888; for(int i=block;i<min(256+block,n);i++) { if(mini>a[i]) { mini=a[i]; } } b[blockIdx.x]=mini; } int main() { cout<<"Enter the size of array"<<endl; int n; cin>>n; int a[n]; cudaEvent_t start,end; cudaEventCreate(&start); cudaEventCreate(&end); for(int i=0;i<n;i++) { cout<<"Enter elements: "; cin>>a[i]; } int *ad,*bd; int size=n*sizeof(int); cudaMalloc(&ad,size); cudaMemcpy(ad,a,size,cudaMemcpyHostToDevice); int grids=ceil(n*1.0f/256.0f); cudaMalloc(&bd,grids*sizeof(int)); dim3 grid(grids,1); dim3 block(1,1); cudaEventRecord(start); while(n>1) { mini1<<<grids,block>>>(ad,bd,n); n=ceil(n*1.0f/256.0f); cudaMemcpy(ad,bd,n*sizeof(int),cudaMemcpyDeviceToDevice); } cudaEventRecord(end); cudaEventSynchronize(end); float time=0; cudaEventElapsedTime(&time,start,end); int ans[2]; cudaMemcpy(ans,ad,4,cudaMemcpyDeviceToHost); cout<<"The minimum element is"<<ans[0]<<endl; cout<<"The time required dor it is"; cout<<time<<endl; }
bd0591de49140318f56d6566039a07ebf106792b.hip
// !!! This is a file automatically generated by hipify!!! #include "../include/HTConfigWorkGPU.cuh" #include "../../../DataStructures/TrigMuonDataStructs/TrigMuonDataStructs/TrigMuonAccelerationEDM.h" #include "tbb/tick_count.h" #include <cstring> #include <cmath> #include <iostream> #include <unistd.h> #include "APE/BufferAccessor.hpp" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> // helper functions for CUDA timing and initialization #include <helper_functions.h> // helper functions for timing, string parsing APE::HTConfigWorkGPU::HTConfigWorkGPU(const HoughTransformContextGPU& ctx, std::shared_ptr<APE::BufferContainer> data) : m_context(0), m_input(data) { m_context = new HoughTransformContextGPU(ctx); m_buffer = std::make_shared<APE::BufferContainer>(sizeof(SIMPLE_OUTPUT_DATA));//output data m_buffer->setAlgorithm(m_input->getAlgorithm()); m_buffer->setModule(m_input->getModule()); APE::BufferAccessor::setToken(*m_buffer,m_input->getToken()); tbb::tick_count tstart=tbb::tick_count::now(); //std::cout<<"In work: Received Algorithm="<<m_input->getAlgorithm() // <<" token="<<m_input->getToken() // <<" module="<<m_input->getModule() // <<" payloadSize="<<m_input->getPayloadSize() // <<" TransferSize="<<m_input->getTransferSize() // <<" userBuffer="<<m_input->getBuffer() // <<std::endl; m_stats.reserve(10); m_stats.push_back((tbb::tick_count::now()-tstart).seconds()*1000.); } APE::HTConfigWorkGPU::~HTConfigWorkGPU(){ if(m_context) delete m_context; } std::shared_ptr<APE::BufferContainer> APE::HTConfigWorkGPU::getOutput(){ return m_buffer; } void APE::HTConfigWorkGPU::run(){ tbb::tick_count tstart=tbb::tick_count::now(); std::cout<<"running HTConfigWork ..." << std::endl; const APE::HoughTransformDeviceContext& devC = m_context->m_devC; int id = m_context->m_devC.m_deviceId; std::cout << "Device Id: " << id << std::endl; checkCudaErrors(hipSetDevice(id)); memcpy(devC.h_HTConfig, &m_context->m_HTConfig, sizeof(HT_ALGO_CONFIGURATION)); std::cout<<"copying configuration to GPU ..."<<std::endl; checkCudaErrors(hipMemcpyAsync(devC.d_HTConfig, devC.h_HTConfig, sizeof(HT_ALGO_CONFIGURATION), hipMemcpyHostToDevice, 0)); checkCudaErrors( hipDeviceSynchronize() ); tbb::tick_count::interval_t duration=tbb::tick_count::now()-tstart; m_stats.push_back(duration.seconds()*1000.0); tstart=tbb::tick_count::now(); SIMPLE_OUTPUT_DATA *pOutput = static_cast<SIMPLE_OUTPUT_DATA*>(m_buffer->getBuffer()); pOutput->m_nDataWords = 777; std::cout << "Check code: " << pOutput->m_nDataWords <<std::endl; duration=tbb::tick_count::now()-tstart; m_stats.push_back(duration.seconds()*1000.0); } const std::vector<double>& APE::HTConfigWorkGPU::getStats(){return m_stats;} /* void APE::HTConfigWorkGPU::saveConfiguration(HT_ALGO_CONFIGURATION* config){ std::cout << "Storing algorithm configurations..." << std::endl; } */
bd0591de49140318f56d6566039a07ebf106792b.cu
#include "../include/HTConfigWorkGPU.cuh" #include "../../../DataStructures/TrigMuonDataStructs/TrigMuonDataStructs/TrigMuonAccelerationEDM.h" #include "tbb/tick_count.h" #include <cstring> #include <cmath> #include <iostream> #include <unistd.h> #include "APE/BufferAccessor.hpp" #include <cuda.h> #include <cuda_runtime.h> #include <helper_cuda.h> // helper functions for CUDA timing and initialization #include <helper_functions.h> // helper functions for timing, string parsing APE::HTConfigWorkGPU::HTConfigWorkGPU(const HoughTransformContextGPU& ctx, std::shared_ptr<APE::BufferContainer> data) : m_context(0), m_input(data) { m_context = new HoughTransformContextGPU(ctx); m_buffer = std::make_shared<APE::BufferContainer>(sizeof(SIMPLE_OUTPUT_DATA));//output data m_buffer->setAlgorithm(m_input->getAlgorithm()); m_buffer->setModule(m_input->getModule()); APE::BufferAccessor::setToken(*m_buffer,m_input->getToken()); tbb::tick_count tstart=tbb::tick_count::now(); //std::cout<<"In work: Received Algorithm="<<m_input->getAlgorithm() // <<" token="<<m_input->getToken() // <<" module="<<m_input->getModule() // <<" payloadSize="<<m_input->getPayloadSize() // <<" TransferSize="<<m_input->getTransferSize() // <<" userBuffer="<<m_input->getBuffer() // <<std::endl; m_stats.reserve(10); m_stats.push_back((tbb::tick_count::now()-tstart).seconds()*1000.); } APE::HTConfigWorkGPU::~HTConfigWorkGPU(){ if(m_context) delete m_context; } std::shared_ptr<APE::BufferContainer> APE::HTConfigWorkGPU::getOutput(){ return m_buffer; } void APE::HTConfigWorkGPU::run(){ tbb::tick_count tstart=tbb::tick_count::now(); std::cout<<"running HTConfigWork ..." << std::endl; const APE::HoughTransformDeviceContext& devC = m_context->m_devC; int id = m_context->m_devC.m_deviceId; std::cout << "Device Id: " << id << std::endl; checkCudaErrors(cudaSetDevice(id)); memcpy(devC.h_HTConfig, &m_context->m_HTConfig, sizeof(HT_ALGO_CONFIGURATION)); std::cout<<"copying configuration to GPU ..."<<std::endl; checkCudaErrors(cudaMemcpyAsync(devC.d_HTConfig, devC.h_HTConfig, sizeof(HT_ALGO_CONFIGURATION), cudaMemcpyHostToDevice, 0)); checkCudaErrors( cudaDeviceSynchronize() ); tbb::tick_count::interval_t duration=tbb::tick_count::now()-tstart; m_stats.push_back(duration.seconds()*1000.0); tstart=tbb::tick_count::now(); SIMPLE_OUTPUT_DATA *pOutput = static_cast<SIMPLE_OUTPUT_DATA*>(m_buffer->getBuffer()); pOutput->m_nDataWords = 777; std::cout << "Check code: " << pOutput->m_nDataWords <<std::endl; duration=tbb::tick_count::now()-tstart; m_stats.push_back(duration.seconds()*1000.0); } const std::vector<double>& APE::HTConfigWorkGPU::getStats(){return m_stats;} /* void APE::HTConfigWorkGPU::saveConfiguration(HT_ALGO_CONFIGURATION* config){ std::cout << "Storing algorithm configurations..." << std::endl; } */
cf8a9f11d28d6f84d3651da5bc2c6b367afa4ed3.hip
// !!! This is a file automatically generated by hipify!!! // RUN: %run_test hipify "%s" "%t" %cuda_args #include <stdio.h> #include <stdlib.h> #include <assert.h> // CHECK: #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // CHECK: #include "hipsparse.h" #include "hipsparse.h" int main(int argc, char*argv[]) { // CHECK: hipsparseHandle_t handle = NULL; hipsparseHandle_t handle = NULL; // CHECK: hipStream_t stream = NULL; hipStream_t stream = NULL; // CHECK: hipsparseStatus_t status = HIPSPARSE_STATUS_SUCCESS; hipsparseStatus_t status = HIPSPARSE_STATUS_SUCCESS; // CHECK: hipError_t cudaStat1 = hipSuccess; // CHECK: hipError_t cudaStat2 = hipSuccess; // CHECK: hipError_t cudaStat3 = hipSuccess; // CHECK: hipError_t cudaStat4 = hipSuccess; // CHECK: hipError_t cudaStat5 = hipSuccess; // CHECK: hipError_t cudaStat6 = hipSuccess; hipError_t cudaStat1 = hipSuccess; hipError_t cudaStat2 = hipSuccess; hipError_t cudaStat3 = hipSuccess; hipError_t cudaStat4 = hipSuccess; hipError_t cudaStat5 = hipSuccess; hipError_t cudaStat6 = hipSuccess; /* * A is a 3x3 sparse matrix * | 1 2 0 | * A = | 0 5 0 | * | 0 8 0 | */ const int m = 3; const int n = 3; const int nnz = 4; #if 0 /* index starts at 0 */ int h_cooRows[nnz] = { 2, 1, 0, 0 }; int h_cooCols[nnz] = { 1, 1, 0, 1 }; #else /* index starts at -2 */ int h_cooRows[nnz] = { 0, -1, -2, -2 }; int h_cooCols[nnz] = { -1, -1, -2, -1 }; #endif double h_cooVals[nnz] = { 8.0, 5.0, 1.0, 2.0 }; int h_P[nnz]; int *d_cooRows = NULL; int *d_cooCols = NULL; int *d_P = NULL; double *d_cooVals = NULL; double *d_cooVals_sorted = NULL; size_t pBufferSizeInBytes = 0; void *pBuffer = NULL; printf("m = %d, n = %d, nnz=%d \n", m, n, nnz); /* step 1: create cusparse handle, bind a stream */ // CHECK: cudaStat1 = hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); cudaStat1 = hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); // CHECK: assert(hipSuccess == cudaStat1); assert(hipSuccess == cudaStat1); // CHECK: status = hipsparseCreate(&handle); status = hipsparseCreate(&handle); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(HIPSPARSE_STATUS_SUCCESS == status); // CHECK: status = hipsparseSetStream(handle, stream); status = hipsparseSetStream(handle, stream); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(HIPSPARSE_STATUS_SUCCESS == status); /* step 2: allocate buffer */ // TODO: status = hipsparseXcoosort_bufferSizeExt( status = hipsparseXcoosort_bufferSizeExt( handle, m, n, nnz, d_cooRows, d_cooCols, &pBufferSizeInBytes ); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(HIPSPARSE_STATUS_SUCCESS == status); printf("pBufferSizeInBytes = %lld bytes \n", (long long)pBufferSizeInBytes); // CHECK: cudaStat1 = hipMalloc(&d_cooRows, sizeof(int)*nnz); cudaStat1 = hipMalloc(&d_cooRows, sizeof(int)*nnz); // CHECK: cudaStat2 = hipMalloc(&d_cooCols, sizeof(int)*nnz); cudaStat2 = hipMalloc(&d_cooCols, sizeof(int)*nnz); // CHECK: cudaStat3 = hipMalloc(&d_P, sizeof(int)*nnz); cudaStat3 = hipMalloc(&d_P, sizeof(int)*nnz); // CHECK: cudaStat4 = hipMalloc(&d_cooVals, sizeof(double)*nnz); cudaStat4 = hipMalloc(&d_cooVals, sizeof(double)*nnz); // CHECK: cudaStat5 = hipMalloc(&d_cooVals_sorted, sizeof(double)*nnz); cudaStat5 = hipMalloc(&d_cooVals_sorted, sizeof(double)*nnz); // CHECK: cudaStat6 = hipMalloc(&pBuffer, sizeof(char)* pBufferSizeInBytes); cudaStat6 = hipMalloc(&pBuffer, sizeof(char)* pBufferSizeInBytes); // CHECK: assert(hipSuccess == cudaStat1); // CHECK: assert(hipSuccess == cudaStat2); // CHECK: assert(hipSuccess == cudaStat3); // CHECK: assert(hipSuccess == cudaStat4); // CHECK: assert(hipSuccess == cudaStat5); // CHECK: assert(hipSuccess == cudaStat6); assert(hipSuccess == cudaStat1); assert(hipSuccess == cudaStat2); assert(hipSuccess == cudaStat3); assert(hipSuccess == cudaStat4); assert(hipSuccess == cudaStat5); assert(hipSuccess == cudaStat6); // CHECK: cudaStat1 = hipMemcpy(d_cooRows, h_cooRows, sizeof(int)*nnz, hipMemcpyHostToDevice); cudaStat1 = hipMemcpy(d_cooRows, h_cooRows, sizeof(int)*nnz, hipMemcpyHostToDevice); // CHECK: cudaStat2 = hipMemcpy(d_cooCols, h_cooCols, sizeof(int)*nnz, hipMemcpyHostToDevice); cudaStat2 = hipMemcpy(d_cooCols, h_cooCols, sizeof(int)*nnz, hipMemcpyHostToDevice); // CHECK: cudaStat3 = hipMemcpy(d_cooVals, h_cooVals, sizeof(double)*nnz, hipMemcpyHostToDevice); cudaStat3 = hipMemcpy(d_cooVals, h_cooVals, sizeof(double)*nnz, hipMemcpyHostToDevice); // CHECK: cudaStat4 = hipDeviceSynchronize(); cudaStat4 = hipDeviceSynchronize(); // CHECK: assert(hipSuccess == cudaStat1); // CHECK: assert(hipSuccess == cudaStat2); // CHECK: assert(hipSuccess == cudaStat3); // CHECK: assert(hipSuccess == cudaStat4); assert(hipSuccess == cudaStat1); assert(hipSuccess == cudaStat2); assert(hipSuccess == cudaStat3); assert(hipSuccess == cudaStat4); /* step 3: setup permutation vector P to identity */ // TODO: status = hipsparseCreateIdentityPermutation( status = hipsparseCreateIdentityPermutation( handle, nnz, d_P); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(HIPSPARSE_STATUS_SUCCESS == status); /* step 4: sort COO format by Row */ // TODO: status = hipsparseXcoosortByRow( status = hipsparseXcoosortByRow( handle, m, n, nnz, d_cooRows, d_cooCols, d_P, pBuffer ); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(HIPSPARSE_STATUS_SUCCESS == status); /* step 5: gather sorted cooVals */ // CHECK: status = hipsparseDgthr( // CHECK: HIPSPARSE_INDEX_BASE_ZERO status = hipsparseDgthr( handle, nnz, d_cooVals, d_cooVals_sorted, d_P, HIPSPARSE_INDEX_BASE_ZERO ); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(HIPSPARSE_STATUS_SUCCESS == status); /* wait until the computation is done */ // CHECK: cudaStat1 = hipDeviceSynchronize(); cudaStat1 = hipDeviceSynchronize(); // CHECK: cudaStat2 = hipMemcpy(h_cooRows, d_cooRows, sizeof(int)*nnz, hipMemcpyDeviceToHost); cudaStat2 = hipMemcpy(h_cooRows, d_cooRows, sizeof(int)*nnz, hipMemcpyDeviceToHost); // CHECK: cudaStat3 = hipMemcpy(h_cooCols, d_cooCols, sizeof(int)*nnz, hipMemcpyDeviceToHost); cudaStat3 = hipMemcpy(h_cooCols, d_cooCols, sizeof(int)*nnz, hipMemcpyDeviceToHost); // CHECK: cudaStat4 = hipMemcpy(h_P, d_P, sizeof(int)*nnz, hipMemcpyDeviceToHost); cudaStat4 = hipMemcpy(h_P, d_P, sizeof(int)*nnz, hipMemcpyDeviceToHost); // CHECK: cudaStat5 = hipMemcpy(h_cooVals, d_cooVals_sorted, sizeof(double)*nnz, hipMemcpyDeviceToHost); cudaStat5 = hipMemcpy(h_cooVals, d_cooVals_sorted, sizeof(double)*nnz, hipMemcpyDeviceToHost); // CHECK: cudaStat6 = hipDeviceSynchronize(); cudaStat6 = hipDeviceSynchronize(); // CHECK: assert(hipSuccess == cudaStat1); // CHECK: assert(hipSuccess == cudaStat2); // CHECK: assert(hipSuccess == cudaStat3); // CHECK: assert(hipSuccess == cudaStat4); // CHECK: assert(hipSuccess == cudaStat5); // CHECK: assert(hipSuccess == cudaStat6); assert(hipSuccess == cudaStat1); assert(hipSuccess == cudaStat2); assert(hipSuccess == cudaStat3); assert(hipSuccess == cudaStat4); assert(hipSuccess == cudaStat5); assert(hipSuccess == cudaStat6); printf("sorted coo: \n"); for (int j = 0; j < nnz; j++) { printf("(%d, %d, %f) \n", h_cooRows[j], h_cooCols[j], h_cooVals[j]); } for (int j = 0; j < nnz; j++) { printf("P[%d] = %d \n", j, h_P[j]); } /* free resources */ // CHECK: if (d_cooRows) hipFree(d_cooRows); if (d_cooRows) hipFree(d_cooRows); // CHECK: if (d_cooCols) hipFree(d_cooCols); if (d_cooCols) hipFree(d_cooCols); // CHECK: if (d_P) hipFree(d_P); if (d_P) hipFree(d_P); // CHECK: if (d_cooVals) hipFree(d_cooVals); if (d_cooVals) hipFree(d_cooVals); // CHECK: if (d_cooVals_sorted) hipFree(d_cooVals_sorted); if (d_cooVals_sorted) hipFree(d_cooVals_sorted); // CHECK: if (pBuffer) hipFree(pBuffer); if (pBuffer) hipFree(pBuffer); // if (handle) hipsparseDestroy(handle); if (handle) hipsparseDestroy(handle); // CHECK: if (stream) hipStreamDestroy(stream); if (stream) hipStreamDestroy(stream); // CHECK: hipDeviceReset(); hipDeviceReset(); return 0; }
cf8a9f11d28d6f84d3651da5bc2c6b367afa4ed3.cu
// RUN: %run_test hipify "%s" "%t" %cuda_args #include <stdio.h> #include <stdlib.h> #include <assert.h> // CHECK: #include <hip/hip_runtime.h> #include <cuda_runtime.h> // CHECK: #include "hipsparse.h" #include "cusparse.h" int main(int argc, char*argv[]) { // CHECK: hipsparseHandle_t handle = NULL; cusparseHandle_t handle = NULL; // CHECK: hipStream_t stream = NULL; cudaStream_t stream = NULL; // CHECK: hipsparseStatus_t status = HIPSPARSE_STATUS_SUCCESS; cusparseStatus_t status = CUSPARSE_STATUS_SUCCESS; // CHECK: hipError_t cudaStat1 = hipSuccess; // CHECK: hipError_t cudaStat2 = hipSuccess; // CHECK: hipError_t cudaStat3 = hipSuccess; // CHECK: hipError_t cudaStat4 = hipSuccess; // CHECK: hipError_t cudaStat5 = hipSuccess; // CHECK: hipError_t cudaStat6 = hipSuccess; cudaError_t cudaStat1 = cudaSuccess; cudaError_t cudaStat2 = cudaSuccess; cudaError_t cudaStat3 = cudaSuccess; cudaError_t cudaStat4 = cudaSuccess; cudaError_t cudaStat5 = cudaSuccess; cudaError_t cudaStat6 = cudaSuccess; /* * A is a 3x3 sparse matrix * | 1 2 0 | * A = | 0 5 0 | * | 0 8 0 | */ const int m = 3; const int n = 3; const int nnz = 4; #if 0 /* index starts at 0 */ int h_cooRows[nnz] = { 2, 1, 0, 0 }; int h_cooCols[nnz] = { 1, 1, 0, 1 }; #else /* index starts at -2 */ int h_cooRows[nnz] = { 0, -1, -2, -2 }; int h_cooCols[nnz] = { -1, -1, -2, -1 }; #endif double h_cooVals[nnz] = { 8.0, 5.0, 1.0, 2.0 }; int h_P[nnz]; int *d_cooRows = NULL; int *d_cooCols = NULL; int *d_P = NULL; double *d_cooVals = NULL; double *d_cooVals_sorted = NULL; size_t pBufferSizeInBytes = 0; void *pBuffer = NULL; printf("m = %d, n = %d, nnz=%d \n", m, n, nnz); /* step 1: create cusparse handle, bind a stream */ // CHECK: cudaStat1 = hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); cudaStat1 = cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: status = hipsparseCreate(&handle); status = cusparseCreate(&handle); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); // CHECK: status = hipsparseSetStream(handle, stream); status = cusparseSetStream(handle, stream); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); /* step 2: allocate buffer */ // TODO: status = hipsparseXcoosort_bufferSizeExt( status = cusparseXcoosort_bufferSizeExt( handle, m, n, nnz, d_cooRows, d_cooCols, &pBufferSizeInBytes ); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); printf("pBufferSizeInBytes = %lld bytes \n", (long long)pBufferSizeInBytes); // CHECK: cudaStat1 = hipMalloc(&d_cooRows, sizeof(int)*nnz); cudaStat1 = cudaMalloc(&d_cooRows, sizeof(int)*nnz); // CHECK: cudaStat2 = hipMalloc(&d_cooCols, sizeof(int)*nnz); cudaStat2 = cudaMalloc(&d_cooCols, sizeof(int)*nnz); // CHECK: cudaStat3 = hipMalloc(&d_P, sizeof(int)*nnz); cudaStat3 = cudaMalloc(&d_P, sizeof(int)*nnz); // CHECK: cudaStat4 = hipMalloc(&d_cooVals, sizeof(double)*nnz); cudaStat4 = cudaMalloc(&d_cooVals, sizeof(double)*nnz); // CHECK: cudaStat5 = hipMalloc(&d_cooVals_sorted, sizeof(double)*nnz); cudaStat5 = cudaMalloc(&d_cooVals_sorted, sizeof(double)*nnz); // CHECK: cudaStat6 = hipMalloc(&pBuffer, sizeof(char)* pBufferSizeInBytes); cudaStat6 = cudaMalloc(&pBuffer, sizeof(char)* pBufferSizeInBytes); // CHECK: assert(hipSuccess == cudaStat1); // CHECK: assert(hipSuccess == cudaStat2); // CHECK: assert(hipSuccess == cudaStat3); // CHECK: assert(hipSuccess == cudaStat4); // CHECK: assert(hipSuccess == cudaStat5); // CHECK: assert(hipSuccess == cudaStat6); assert(cudaSuccess == cudaStat1); assert(cudaSuccess == cudaStat2); assert(cudaSuccess == cudaStat3); assert(cudaSuccess == cudaStat4); assert(cudaSuccess == cudaStat5); assert(cudaSuccess == cudaStat6); // CHECK: cudaStat1 = hipMemcpy(d_cooRows, h_cooRows, sizeof(int)*nnz, hipMemcpyHostToDevice); cudaStat1 = cudaMemcpy(d_cooRows, h_cooRows, sizeof(int)*nnz, cudaMemcpyHostToDevice); // CHECK: cudaStat2 = hipMemcpy(d_cooCols, h_cooCols, sizeof(int)*nnz, hipMemcpyHostToDevice); cudaStat2 = cudaMemcpy(d_cooCols, h_cooCols, sizeof(int)*nnz, cudaMemcpyHostToDevice); // CHECK: cudaStat3 = hipMemcpy(d_cooVals, h_cooVals, sizeof(double)*nnz, hipMemcpyHostToDevice); cudaStat3 = cudaMemcpy(d_cooVals, h_cooVals, sizeof(double)*nnz, cudaMemcpyHostToDevice); // CHECK: cudaStat4 = hipDeviceSynchronize(); cudaStat4 = cudaDeviceSynchronize(); // CHECK: assert(hipSuccess == cudaStat1); // CHECK: assert(hipSuccess == cudaStat2); // CHECK: assert(hipSuccess == cudaStat3); // CHECK: assert(hipSuccess == cudaStat4); assert(cudaSuccess == cudaStat1); assert(cudaSuccess == cudaStat2); assert(cudaSuccess == cudaStat3); assert(cudaSuccess == cudaStat4); /* step 3: setup permutation vector P to identity */ // TODO: status = hipsparseCreateIdentityPermutation( status = cusparseCreateIdentityPermutation( handle, nnz, d_P); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); /* step 4: sort COO format by Row */ // TODO: status = hipsparseXcoosortByRow( status = cusparseXcoosortByRow( handle, m, n, nnz, d_cooRows, d_cooCols, d_P, pBuffer ); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); /* step 5: gather sorted cooVals */ // CHECK: status = hipsparseDgthr( // CHECK: HIPSPARSE_INDEX_BASE_ZERO status = cusparseDgthr( handle, nnz, d_cooVals, d_cooVals_sorted, d_P, CUSPARSE_INDEX_BASE_ZERO ); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); /* wait until the computation is done */ // CHECK: cudaStat1 = hipDeviceSynchronize(); cudaStat1 = cudaDeviceSynchronize(); // CHECK: cudaStat2 = hipMemcpy(h_cooRows, d_cooRows, sizeof(int)*nnz, hipMemcpyDeviceToHost); cudaStat2 = cudaMemcpy(h_cooRows, d_cooRows, sizeof(int)*nnz, cudaMemcpyDeviceToHost); // CHECK: cudaStat3 = hipMemcpy(h_cooCols, d_cooCols, sizeof(int)*nnz, hipMemcpyDeviceToHost); cudaStat3 = cudaMemcpy(h_cooCols, d_cooCols, sizeof(int)*nnz, cudaMemcpyDeviceToHost); // CHECK: cudaStat4 = hipMemcpy(h_P, d_P, sizeof(int)*nnz, hipMemcpyDeviceToHost); cudaStat4 = cudaMemcpy(h_P, d_P, sizeof(int)*nnz, cudaMemcpyDeviceToHost); // CHECK: cudaStat5 = hipMemcpy(h_cooVals, d_cooVals_sorted, sizeof(double)*nnz, hipMemcpyDeviceToHost); cudaStat5 = cudaMemcpy(h_cooVals, d_cooVals_sorted, sizeof(double)*nnz, cudaMemcpyDeviceToHost); // CHECK: cudaStat6 = hipDeviceSynchronize(); cudaStat6 = cudaDeviceSynchronize(); // CHECK: assert(hipSuccess == cudaStat1); // CHECK: assert(hipSuccess == cudaStat2); // CHECK: assert(hipSuccess == cudaStat3); // CHECK: assert(hipSuccess == cudaStat4); // CHECK: assert(hipSuccess == cudaStat5); // CHECK: assert(hipSuccess == cudaStat6); assert(cudaSuccess == cudaStat1); assert(cudaSuccess == cudaStat2); assert(cudaSuccess == cudaStat3); assert(cudaSuccess == cudaStat4); assert(cudaSuccess == cudaStat5); assert(cudaSuccess == cudaStat6); printf("sorted coo: \n"); for (int j = 0; j < nnz; j++) { printf("(%d, %d, %f) \n", h_cooRows[j], h_cooCols[j], h_cooVals[j]); } for (int j = 0; j < nnz; j++) { printf("P[%d] = %d \n", j, h_P[j]); } /* free resources */ // CHECK: if (d_cooRows) hipFree(d_cooRows); if (d_cooRows) cudaFree(d_cooRows); // CHECK: if (d_cooCols) hipFree(d_cooCols); if (d_cooCols) cudaFree(d_cooCols); // CHECK: if (d_P) hipFree(d_P); if (d_P) cudaFree(d_P); // CHECK: if (d_cooVals) hipFree(d_cooVals); if (d_cooVals) cudaFree(d_cooVals); // CHECK: if (d_cooVals_sorted) hipFree(d_cooVals_sorted); if (d_cooVals_sorted) cudaFree(d_cooVals_sorted); // CHECK: if (pBuffer) hipFree(pBuffer); if (pBuffer) cudaFree(pBuffer); // if (handle) hipsparseDestroy(handle); if (handle) cusparseDestroy(handle); // CHECK: if (stream) hipStreamDestroy(stream); if (stream) cudaStreamDestroy(stream); // CHECK: hipDeviceReset(); cudaDeviceReset(); return 0; }
28daa91b8cf8bdb77b7993f9ea63e66b58df6846.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include"pathalg.h" static const int WORK_SIZE =258; void Bellmanor::copydata(int s,vector<edge>&edges,int nodenum){ }; void Bellmanor::dellocate(){ }; void Bellmanor::allocate(int maxn,int maxedge){ } void Bellmanor::topsort() { cout<<" in top sort "<<endl; queue<int>zero; vector<int>order(nodenum*LY,-1); for(int i=0;i<nodenum*LY;i++) zero.push(i); int biao=0; while(!zero.empty()) { int node=zero.front(); zero.pop(); order[node]=biao++; for(int i=0;i<neibn[node].size();i++) { if((--ancestor[neibn[node][i]])==0) zero.push(neibn[node][i]); } } vector<pair<int,int>>tmp; for(int i=0;i<order.size();i++) tmp.push_back(make_pair(i,order[i])); for(int i=0;i<order.size();i++) ordernode.push_back(tmp[i].first); }; void Bellmanor::updatE(vector<int>esigns) { } void Bellmanor::updatS(vector<vector<pair<int,int>>>&stpair) { cout<<"inasd asd"<<endl; L[0]=LY1; L[1]=LY2; S[0]=stpair[0].size(); S[1]=stpair[1].size(); int count=0; ncount=L[0]*S[0]+L[1]*S[1]; for(int i=0;i<nodenum*ncount;i++) d[i]=INT_MAX/2,p[i]=-1; int woffid=0; for(int h=0;h<stpair.size();h++) { for(int k=0;k<L[h];k++) { for(int j=0;j<stpair[h].size();j++) { d[count*nodenum+stpair[h][j].first]=0; count++; } } } /*for(int j=0;j<4;j++) { for(int i=0;i<nodenum;i++) cout<<d[i+j*nodenum]<<" "; cout<<endl; }*/ cout<<"here it is "<<endl; for(int i=1;i<NF.size();i++) NF[i]=L[i-1]*S[i-1]; nodeoff[0]=0; nodeoff[1]=S[0]*L[0]*nodenum; leveloff[0]=0; leveloff[1]=L[0]*edges.size(); size[0]=edges.size()*L[0]*S[0]; size[1]=edges.size()*L[1]*S[1]; cout<<"asd"<<endl; cout<<"ncount is "<<count <<endl; hipMemcpy(dev_d,d,ncount*nodenum*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_p,p,ncount*nodenum*sizeof(int),hipMemcpyHostToDevice); cout<<"out!!!!"<<endl; } void Bellmanor::init(pair<vector<edge>,vector<vector<int>>>ext,vector<pair<int,int>>stpair,vector<vector<int>>&relate,ginfo ginf) { cout<<"init bellmanor"<<endl; nodenum=ginf.pnodesize; edges=ext.first; vector<vector<int>>esigns; esigns=ext.second; stp=stpair; W=WD+1; st=new int[edges.size()*LY]; te=new int[edges.size()*LY]; d=new int[nodenum*LY*YE]; has=new int[nodenum*LY*YE]; p=new int[nodenum*LY*YE]; w=new int[edges.size()*LY]; m1=new int; m2=new int; *m1=0,*m2=0; esignes=new int[edges.size()*LY]; vector<vector<int>>nein(nodenum*LY,vector<int>()); neibn=nein; vector<vector<int>>neie(nodenum,vector<int>()); for(int i=0;i<edges.size();i++) { int s=edges[i].s; int t=edges[i].t; neibn[s].push_back(t); neie[s].push_back(i); } int count=0; for(int k=0;k<LY;k++) for(int i=0;i<nodenum;i++) for(int j=0;j<neibn[i].size();j++) { st[count]=i; if(esigns[k][neie[i][j]]<0) te[count]=i; else te[count]=neibn[i][j]; count++; } int cc=0; for(int k=0;k<LY;k++) for(int i=0;i<edges.size();i++) w[cc++]=esigns[k][i]; hipMalloc((void**)&dev_st,LY*edges.size()*sizeof(int)); hipMalloc((void**)&dev_te,LY*edges.size()*sizeof(int)); hipMalloc((void**)&dev_d,YE*LY*nodenum*sizeof(int)); hipMalloc((void**)&dev_p,YE*LY*nodenum*sizeof(int)); hipMalloc((void**)&dev_has,YE*LY*nodenum*sizeof(int)); hipMalloc((void**)&dev_w,LY*edges.size()*sizeof(int)); hipMalloc((void**)&dev_m1,sizeof(int)); hipMalloc((void**)&dev_m2,sizeof(int)); if(dev_d==NULL) { printf("couldn't allocate %d int's.\n"); } hipMemcpy(dev_te,te,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_st,st,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_w,w,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_has,has,YE*LY*nodenum*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_m1,m1,sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_m2,m2,sizeof(int),hipMemcpyHostToDevice); //cout<<nodenum<<endl; }; Bellmanor::Bellmanor():L(2,0),S(2,0),NF(2,0),nodeoff(2,0),leveloff(2,0),size(2,0) { }; __global__ void bellmanhigh(int *st,int *te,int *d,int *has,int *w,int E,int N,int size,int *m,int round,int Leveloff,int numoff,int ye,int ly) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>size)return; int eid=(i%(E*ly)); int eeid=eid+Leveloff; int s=st[eeid],t=te[eeid],weight=w[eeid]; if(weight<0)return; int off=(i/(E*ly))*N+(eid/E)*N*ye+numoff; //if(has[s+off]<round-1)return; if(d[s+off]+weight<d[t+off]) { d[t+off]=weight+d[s+off]; //has[t+off]=round; *m=1; } } __global__ void color(int *st,int *te,int *d,int *pre,int *has,int *w,int E,int N,int size,int round,int Leveloff,int numoff,int ye,int ly) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>size)return; int eid=(i%(E*ly)); int eeid=eid+Leveloff; int s=st[eeid],t=te[eeid],weight=w[eeid]; if(weight<0)return; int off=(i/(E*ly))*N+(eid/E)*N*ye+numoff; //if(has[s+off]<round-1)return; if(d[s+off]+weight==d[t+off]) pre[t+off]=s+off; } vector<vector<int>> Bellmanor::routalg(int s,int t,int bw) { cout<<"inbellman"<<endl; int kk=1; time_t start,end; start=clock(); *m1=1; *m2=1; int round=1; cout<<"fuck wx!"<<endl; hipStream_t stream0,stream1; hipStreamCreate(&stream0); hipStreamCreate(&stream1); int flag1=0,flag2=0; int cc=0; while(*m2==1||*m1==1) //for(int i=0;i<10;i++) { *m2=0,*m1=0; hipMemcpyAsync(dev_m2,m2,sizeof(int),hipMemcpyHostToDevice,stream1); hipLaunchKernelGGL(( bellmanhigh), dim3(size[1]/1024+1),dim3(1024),0,stream1, dev_st,dev_te,dev_d,dev_has,dev_w,edges.size(),nodenum,size[1],dev_m2,round,leveloff[1],nodeoff[1],S[1],L[1]); hipMemcpyAsync(dev_m1,m1,sizeof(int),hipMemcpyHostToDevice,stream0); hipLaunchKernelGGL(( bellmanhigh), dim3(size[0]/1024+1),dim3(1024),0,stream0, dev_st,dev_te,dev_d,dev_has,dev_w,edges.size(),nodenum,size[0],dev_m2,round,leveloff[0],nodeoff[0],S[0],L[0]); hipLaunchKernelGGL(( color), dim3(size[1]/1024+1),dim3(1024),0,stream1, dev_st,dev_te,dev_d,dev_p,dev_has,dev_w,edges.size(),nodenum,size[1],round,leveloff[1],nodeoff[1],S[1],L[1]); hipMemcpyAsync(m2,dev_m2,sizeof(int),hipMemcpyDeviceToHost,stream1); hipLaunchKernelGGL(( color), dim3(size[0]/1024+1),dim3(1024),0,stream0, dev_st,dev_te,dev_d,dev_p,dev_has,dev_w,edges.size(),nodenum,size[0],round,leveloff[0],nodeoff[0],S[0],L[0]); hipMemcpyAsync(m1,dev_m1,sizeof(int),hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); hipStreamSynchronize(stream0); } hipMemcpy(d,dev_d,LY*YE*nodenum*sizeof(int),hipMemcpyDeviceToHost); /*for(int j=0;j<8;j++) {for(int i=0;i<nodenum;i++) cout<<d[i+j*nodenum]<<" "; cout<<endl; }*/ cout<<endl; end=clock(); cout<<"GPU time is : "<<end-start<<endl; cout<<"over!"<<endl; vector<vector<int>>result(LY,vector<int>()); hipFree(dev_te); hipFree(dev_st); hipFree(dev_d); hipFree(dev_w); cout<<"before return"<<endl; return result; };
28daa91b8cf8bdb77b7993f9ea63e66b58df6846.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include"pathalg.h" static const int WORK_SIZE =258; void Bellmanor::copydata(int s,vector<edge>&edges,int nodenum){ }; void Bellmanor::dellocate(){ }; void Bellmanor::allocate(int maxn,int maxedge){ } void Bellmanor::topsort() { cout<<" in top sort "<<endl; queue<int>zero; vector<int>order(nodenum*LY,-1); for(int i=0;i<nodenum*LY;i++) zero.push(i); int biao=0; while(!zero.empty()) { int node=zero.front(); zero.pop(); order[node]=biao++; for(int i=0;i<neibn[node].size();i++) { if((--ancestor[neibn[node][i]])==0) zero.push(neibn[node][i]); } } vector<pair<int,int>>tmp; for(int i=0;i<order.size();i++) tmp.push_back(make_pair(i,order[i])); for(int i=0;i<order.size();i++) ordernode.push_back(tmp[i].first); }; void Bellmanor::updatE(vector<int>esigns) { } void Bellmanor::updatS(vector<vector<pair<int,int>>>&stpair) { cout<<"inasd asd"<<endl; L[0]=LY1; L[1]=LY2; S[0]=stpair[0].size(); S[1]=stpair[1].size(); int count=0; ncount=L[0]*S[0]+L[1]*S[1]; for(int i=0;i<nodenum*ncount;i++) d[i]=INT_MAX/2,p[i]=-1; int woffid=0; for(int h=0;h<stpair.size();h++) { for(int k=0;k<L[h];k++) { for(int j=0;j<stpair[h].size();j++) { d[count*nodenum+stpair[h][j].first]=0; count++; } } } /*for(int j=0;j<4;j++) { for(int i=0;i<nodenum;i++) cout<<d[i+j*nodenum]<<" "; cout<<endl; }*/ cout<<"here it is "<<endl; for(int i=1;i<NF.size();i++) NF[i]=L[i-1]*S[i-1]; nodeoff[0]=0; nodeoff[1]=S[0]*L[0]*nodenum; leveloff[0]=0; leveloff[1]=L[0]*edges.size(); size[0]=edges.size()*L[0]*S[0]; size[1]=edges.size()*L[1]*S[1]; cout<<"asd"<<endl; cout<<"ncount is "<<count <<endl; cudaMemcpy(dev_d,d,ncount*nodenum*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_p,p,ncount*nodenum*sizeof(int),cudaMemcpyHostToDevice); cout<<"out!!!!"<<endl; } void Bellmanor::init(pair<vector<edge>,vector<vector<int>>>ext,vector<pair<int,int>>stpair,vector<vector<int>>&relate,ginfo ginf) { cout<<"init bellmanor"<<endl; nodenum=ginf.pnodesize; edges=ext.first; vector<vector<int>>esigns; esigns=ext.second; stp=stpair; W=WD+1; st=new int[edges.size()*LY]; te=new int[edges.size()*LY]; d=new int[nodenum*LY*YE]; has=new int[nodenum*LY*YE]; p=new int[nodenum*LY*YE]; w=new int[edges.size()*LY]; m1=new int; m2=new int; *m1=0,*m2=0; esignes=new int[edges.size()*LY]; vector<vector<int>>nein(nodenum*LY,vector<int>()); neibn=nein; vector<vector<int>>neie(nodenum,vector<int>()); for(int i=0;i<edges.size();i++) { int s=edges[i].s; int t=edges[i].t; neibn[s].push_back(t); neie[s].push_back(i); } int count=0; for(int k=0;k<LY;k++) for(int i=0;i<nodenum;i++) for(int j=0;j<neibn[i].size();j++) { st[count]=i; if(esigns[k][neie[i][j]]<0) te[count]=i; else te[count]=neibn[i][j]; count++; } int cc=0; for(int k=0;k<LY;k++) for(int i=0;i<edges.size();i++) w[cc++]=esigns[k][i]; cudaMalloc((void**)&dev_st,LY*edges.size()*sizeof(int)); cudaMalloc((void**)&dev_te,LY*edges.size()*sizeof(int)); cudaMalloc((void**)&dev_d,YE*LY*nodenum*sizeof(int)); cudaMalloc((void**)&dev_p,YE*LY*nodenum*sizeof(int)); cudaMalloc((void**)&dev_has,YE*LY*nodenum*sizeof(int)); cudaMalloc((void**)&dev_w,LY*edges.size()*sizeof(int)); cudaMalloc((void**)&dev_m1,sizeof(int)); cudaMalloc((void**)&dev_m2,sizeof(int)); if(dev_d==NULL) { printf("couldn't allocate %d int's.\n"); } cudaMemcpy(dev_te,te,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_st,st,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_w,w,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_has,has,YE*LY*nodenum*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_m1,m1,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_m2,m2,sizeof(int),cudaMemcpyHostToDevice); //cout<<nodenum<<endl; }; Bellmanor::Bellmanor():L(2,0),S(2,0),NF(2,0),nodeoff(2,0),leveloff(2,0),size(2,0) { }; __global__ void bellmanhigh(int *st,int *te,int *d,int *has,int *w,int E,int N,int size,int *m,int round,int Leveloff,int numoff,int ye,int ly) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>size)return; int eid=(i%(E*ly)); int eeid=eid+Leveloff; int s=st[eeid],t=te[eeid],weight=w[eeid]; if(weight<0)return; int off=(i/(E*ly))*N+(eid/E)*N*ye+numoff; //if(has[s+off]<round-1)return; if(d[s+off]+weight<d[t+off]) { d[t+off]=weight+d[s+off]; //has[t+off]=round; *m=1; } } __global__ void color(int *st,int *te,int *d,int *pre,int *has,int *w,int E,int N,int size,int round,int Leveloff,int numoff,int ye,int ly) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>size)return; int eid=(i%(E*ly)); int eeid=eid+Leveloff; int s=st[eeid],t=te[eeid],weight=w[eeid]; if(weight<0)return; int off=(i/(E*ly))*N+(eid/E)*N*ye+numoff; //if(has[s+off]<round-1)return; if(d[s+off]+weight==d[t+off]) pre[t+off]=s+off; } vector<vector<int>> Bellmanor::routalg(int s,int t,int bw) { cout<<"inbellman"<<endl; int kk=1; time_t start,end; start=clock(); *m1=1; *m2=1; int round=1; cout<<"fuck wx!"<<endl; cudaStream_t stream0,stream1; cudaStreamCreate(&stream0); cudaStreamCreate(&stream1); int flag1=0,flag2=0; int cc=0; while(*m2==1||*m1==1) //for(int i=0;i<10;i++) { *m2=0,*m1=0; cudaMemcpyAsync(dev_m2,m2,sizeof(int),cudaMemcpyHostToDevice,stream1); bellmanhigh<<<size[1]/1024+1,1024,0,stream1>>>(dev_st,dev_te,dev_d,dev_has,dev_w,edges.size(),nodenum,size[1],dev_m2,round,leveloff[1],nodeoff[1],S[1],L[1]); cudaMemcpyAsync(dev_m1,m1,sizeof(int),cudaMemcpyHostToDevice,stream0); bellmanhigh<<<size[0]/1024+1,1024,0,stream0>>>(dev_st,dev_te,dev_d,dev_has,dev_w,edges.size(),nodenum,size[0],dev_m2,round,leveloff[0],nodeoff[0],S[0],L[0]); color<<<size[1]/1024+1,1024,0,stream1>>>(dev_st,dev_te,dev_d,dev_p,dev_has,dev_w,edges.size(),nodenum,size[1],round,leveloff[1],nodeoff[1],S[1],L[1]); cudaMemcpyAsync(m2,dev_m2,sizeof(int),cudaMemcpyDeviceToHost,stream1); color<<<size[0]/1024+1,1024,0,stream0>>>(dev_st,dev_te,dev_d,dev_p,dev_has,dev_w,edges.size(),nodenum,size[0],round,leveloff[0],nodeoff[0],S[0],L[0]); cudaMemcpyAsync(m1,dev_m1,sizeof(int),cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); cudaStreamSynchronize(stream0); } cudaMemcpy(d,dev_d,LY*YE*nodenum*sizeof(int),cudaMemcpyDeviceToHost); /*for(int j=0;j<8;j++) {for(int i=0;i<nodenum;i++) cout<<d[i+j*nodenum]<<" "; cout<<endl; }*/ cout<<endl; end=clock(); cout<<"GPU time is : "<<end-start<<endl; cout<<"over!"<<endl; vector<vector<int>>result(LY,vector<int>()); cudaFree(dev_te); cudaFree(dev_st); cudaFree(dev_d); cudaFree(dev_w); cout<<"before return"<<endl; return result; };
a4e6e18c280e9ce0afa527c836d04c41320afe44.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <catboost/cuda/gpu_data/gpu_structures.h> #include <catboost/cuda/cuda_util/kernel/compression.cuh> #include <catboost/cuda/cuda_util/kernel/compression_helper.cuh> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> namespace NKernel { struct TBinSplitLoader { const ui32* CompressedIndex; const ui32* Indices; ui32 Value; ui32 Mask; bool TakeEqual; __forceinline__ __device__ TBinSplitLoader(const ui32* index, const ui32* indices, const ui32 value, const ui32 mask, bool takeEqual) : CompressedIndex(index) , Indices(indices) , Value(value) , Mask(mask) , TakeEqual(takeEqual) { } __forceinline__ __device__ ui32 operator()(ui32 offset) { const ui32 idx = Indices ? Indices[offset] : offset; const ui32 featureVal = CompressedIndex[idx] & Mask; return static_cast<ui32>(TakeEqual ? (featureVal == Value) : featureVal > Value); } }; struct TFloatSplitLoader { const float* Values; const ui32* Indices; float Border; __device__ TFloatSplitLoader(const float* values, const ui32* indices, float border ) : Values(values) , Indices(indices) , Border(border) { } __forceinline__ __device__ ui32 operator()(ui32 offset) { ui32 idx = Indices ? Indices[offset] : offset; return static_cast<ui32>(Values[idx] > Border); } }; struct TBinUpdater { ui32* Bins; ui32 Depth; __forceinline__ __device__ TBinUpdater(ui32* bins, ui32 depth) : Bins(bins) , Depth(depth) { } __forceinline__ __device__ ui32 operator()(ui32 offset, ui32 bin) { return Bins[offset] |= bin << Depth; } }; template <int BLOCK_SIZE> __global__ void WriteCompressedSplitImpl(TCFeature feature, ui32 binIdx, const ui32* compressedIndex, const ui32* indices, int size, ui64* compressedBits) { TCompressionHelper<ui64, BLOCK_SIZE> helper(1); if (indices) { indices += helper.KeysPerBlock() * blockIdx.x; } else { compressedIndex += helper.KeysPerBlock() * blockIdx.x; } size -= helper.KeysPerBlock() * blockIdx.x; compressedBits += BLOCK_SIZE * blockIdx.x; compressedIndex += feature.Offset; const ui32 value = binIdx << feature.Shift; const ui32 mask = feature.Mask << feature.Shift; TBinSplitLoader loader(compressedIndex, indices, value, mask, feature.OneHotFeature); helper.CompressBlock(loader, size, compressedBits); } template <int BLOCK_SIZE> __global__ void WriteCompressedSplitFloatImpl(const float* values, float border, const ui32* indices, int size, ui64* compressedBits) { TCompressionHelper<ui64, BLOCK_SIZE> helper(1); if (indices) { indices += helper.KeysPerBlock() * blockIdx.x; } else { values += helper.KeysPerBlock() * blockIdx.x; } size -= helper.KeysPerBlock() * blockIdx.x; compressedBits += BLOCK_SIZE * blockIdx.x; TFloatSplitLoader loader(values, indices, border); helper.CompressBlock(loader, size, compressedBits); } template <int BLOCK_SIZE> __global__ void UpdateBinsImpl(const ui64* compressedBits, ui32 depth, ui32* bins, int size) { TCompressionHelper<ui64, BLOCK_SIZE> helper(1); bins += helper.KeysPerBlock() * blockIdx.x; size -= helper.KeysPerBlock() * blockIdx.x; compressedBits += BLOCK_SIZE * blockIdx.x; TBinUpdater writer(bins, depth); helper.DecompressBlock(writer, compressedBits, size); } void WriteCompressedSplit(TCFeature feature, ui32 binIdx, const ui32* compressedIndex, const ui32* indices, int size, ui64* compressedBits, TCudaStream stream) { constexpr int blockSize = CompressCudaBlockSize(); const int numBlocks = CeilDivide(size, TCompressionHelper<ui64, blockSize>(1).KeysPerBlock()); if (numBlocks) { WriteCompressedSplitImpl<blockSize> << < numBlocks, blockSize, 0, stream >> >(feature, binIdx, compressedIndex, indices, size, compressedBits); } } void WriteCompressedSplitFloat(const float* values, float border, const ui32* indices, int size, ui64* compressedBits, TCudaStream stream) { constexpr int blockSize = CompressCudaBlockSize(); const int numBlocks = CeilDivide(size, TCompressionHelper<ui64, blockSize>(1).KeysPerBlock()); if (numBlocks) { WriteCompressedSplitFloatImpl<blockSize> << < numBlocks, blockSize, 0, stream >> >(values, border, indices, size, compressedBits); } } void UpdateBins(const ui64* compressedBits, ui32 depth, ui32* bins, int size, TCudaStream stream) { constexpr int blockSize = CompressCudaBlockSize(); const int numBlocks = CeilDivide(size, TCompressionHelper<ui64, blockSize>(1).KeysPerBlock()); if (numBlocks) { UpdateBinsImpl<blockSize> << < numBlocks, blockSize, 0, stream >> >(compressedBits, depth, bins, size); } } __global__ void UpdateBinsFromCompressedIndexImpl(const ui32* compressedIndex, const ui32* indices, const int size, const TCFeature feature, const ui32 binIdx, const ui32 depth, ui32* bins) { compressedIndex += feature.Offset; int i = blockIdx.x * blockDim.x + threadIdx.x; const ui32 value = binIdx << feature.Shift; const ui32 mask = feature.Mask << feature.Shift; while (i < size) { const ui32 idx = indices ? __ldg(indices + i) : i; const ui32 featureVal = __ldg(compressedIndex + idx) & mask; const ui32 split = (feature.OneHotFeature ? (featureVal == value) : featureVal > value); bins[i] |= split << depth; i += blockDim.x * gridDim.x; } } void UpdateBinsFromCompressedIndex(const ui32* compressedIndex, const ui32* indices, const int size, const TCFeature feature, const ui32 binIdx, const ui32 depth, ui32* bins, TCudaStream stream) { constexpr int blockSize = 256; const int numBlocks = min(CeilDivide(size, blockSize), TArchProps::MaxBlockCount()); if (numBlocks) { UpdateBinsFromCompressedIndexImpl << < numBlocks, blockSize, 0, stream >> >(compressedIndex, indices, size, feature, binIdx, depth, bins); } } }
a4e6e18c280e9ce0afa527c836d04c41320afe44.cu
#include <catboost/cuda/gpu_data/gpu_structures.h> #include <catboost/cuda/cuda_util/kernel/compression.cuh> #include <catboost/cuda/cuda_util/kernel/compression_helper.cuh> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> namespace NKernel { struct TBinSplitLoader { const ui32* CompressedIndex; const ui32* Indices; ui32 Value; ui32 Mask; bool TakeEqual; __forceinline__ __device__ TBinSplitLoader(const ui32* index, const ui32* indices, const ui32 value, const ui32 mask, bool takeEqual) : CompressedIndex(index) , Indices(indices) , Value(value) , Mask(mask) , TakeEqual(takeEqual) { } __forceinline__ __device__ ui32 operator()(ui32 offset) { const ui32 idx = Indices ? Indices[offset] : offset; const ui32 featureVal = CompressedIndex[idx] & Mask; return static_cast<ui32>(TakeEqual ? (featureVal == Value) : featureVal > Value); } }; struct TFloatSplitLoader { const float* Values; const ui32* Indices; float Border; __device__ TFloatSplitLoader(const float* values, const ui32* indices, float border ) : Values(values) , Indices(indices) , Border(border) { } __forceinline__ __device__ ui32 operator()(ui32 offset) { ui32 idx = Indices ? Indices[offset] : offset; return static_cast<ui32>(Values[idx] > Border); } }; struct TBinUpdater { ui32* Bins; ui32 Depth; __forceinline__ __device__ TBinUpdater(ui32* bins, ui32 depth) : Bins(bins) , Depth(depth) { } __forceinline__ __device__ ui32 operator()(ui32 offset, ui32 bin) { return Bins[offset] |= bin << Depth; } }; template <int BLOCK_SIZE> __global__ void WriteCompressedSplitImpl(TCFeature feature, ui32 binIdx, const ui32* compressedIndex, const ui32* indices, int size, ui64* compressedBits) { TCompressionHelper<ui64, BLOCK_SIZE> helper(1); if (indices) { indices += helper.KeysPerBlock() * blockIdx.x; } else { compressedIndex += helper.KeysPerBlock() * blockIdx.x; } size -= helper.KeysPerBlock() * blockIdx.x; compressedBits += BLOCK_SIZE * blockIdx.x; compressedIndex += feature.Offset; const ui32 value = binIdx << feature.Shift; const ui32 mask = feature.Mask << feature.Shift; TBinSplitLoader loader(compressedIndex, indices, value, mask, feature.OneHotFeature); helper.CompressBlock(loader, size, compressedBits); } template <int BLOCK_SIZE> __global__ void WriteCompressedSplitFloatImpl(const float* values, float border, const ui32* indices, int size, ui64* compressedBits) { TCompressionHelper<ui64, BLOCK_SIZE> helper(1); if (indices) { indices += helper.KeysPerBlock() * blockIdx.x; } else { values += helper.KeysPerBlock() * blockIdx.x; } size -= helper.KeysPerBlock() * blockIdx.x; compressedBits += BLOCK_SIZE * blockIdx.x; TFloatSplitLoader loader(values, indices, border); helper.CompressBlock(loader, size, compressedBits); } template <int BLOCK_SIZE> __global__ void UpdateBinsImpl(const ui64* compressedBits, ui32 depth, ui32* bins, int size) { TCompressionHelper<ui64, BLOCK_SIZE> helper(1); bins += helper.KeysPerBlock() * blockIdx.x; size -= helper.KeysPerBlock() * blockIdx.x; compressedBits += BLOCK_SIZE * blockIdx.x; TBinUpdater writer(bins, depth); helper.DecompressBlock(writer, compressedBits, size); } void WriteCompressedSplit(TCFeature feature, ui32 binIdx, const ui32* compressedIndex, const ui32* indices, int size, ui64* compressedBits, TCudaStream stream) { constexpr int blockSize = CompressCudaBlockSize(); const int numBlocks = CeilDivide(size, TCompressionHelper<ui64, blockSize>(1).KeysPerBlock()); if (numBlocks) { WriteCompressedSplitImpl<blockSize> << < numBlocks, blockSize, 0, stream >> >(feature, binIdx, compressedIndex, indices, size, compressedBits); } } void WriteCompressedSplitFloat(const float* values, float border, const ui32* indices, int size, ui64* compressedBits, TCudaStream stream) { constexpr int blockSize = CompressCudaBlockSize(); const int numBlocks = CeilDivide(size, TCompressionHelper<ui64, blockSize>(1).KeysPerBlock()); if (numBlocks) { WriteCompressedSplitFloatImpl<blockSize> << < numBlocks, blockSize, 0, stream >> >(values, border, indices, size, compressedBits); } } void UpdateBins(const ui64* compressedBits, ui32 depth, ui32* bins, int size, TCudaStream stream) { constexpr int blockSize = CompressCudaBlockSize(); const int numBlocks = CeilDivide(size, TCompressionHelper<ui64, blockSize>(1).KeysPerBlock()); if (numBlocks) { UpdateBinsImpl<blockSize> << < numBlocks, blockSize, 0, stream >> >(compressedBits, depth, bins, size); } } __global__ void UpdateBinsFromCompressedIndexImpl(const ui32* compressedIndex, const ui32* indices, const int size, const TCFeature feature, const ui32 binIdx, const ui32 depth, ui32* bins) { compressedIndex += feature.Offset; int i = blockIdx.x * blockDim.x + threadIdx.x; const ui32 value = binIdx << feature.Shift; const ui32 mask = feature.Mask << feature.Shift; while (i < size) { const ui32 idx = indices ? __ldg(indices + i) : i; const ui32 featureVal = __ldg(compressedIndex + idx) & mask; const ui32 split = (feature.OneHotFeature ? (featureVal == value) : featureVal > value); bins[i] |= split << depth; i += blockDim.x * gridDim.x; } } void UpdateBinsFromCompressedIndex(const ui32* compressedIndex, const ui32* indices, const int size, const TCFeature feature, const ui32 binIdx, const ui32 depth, ui32* bins, TCudaStream stream) { constexpr int blockSize = 256; const int numBlocks = min(CeilDivide(size, blockSize), TArchProps::MaxBlockCount()); if (numBlocks) { UpdateBinsFromCompressedIndexImpl << < numBlocks, blockSize, 0, stream >> >(compressedIndex, indices, size, feature, binIdx, depth, bins); } } }
6ba348e82d20dfb75424fb13aff3097e2b22e800.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * wcsphColagrossiLandrini.cuh * * Author: Kamil Szewc ([email protected]) * Modified on: 26-09-2014 * */ #include <thrust/device_vector.h> #include "../sph.h" #include "wcsphStandard/wcsphStandard.cuh" #include "general/calcNumberOfCells/calcNumberOfCells.cuh" #include "general/calcTimeStep/calcTimeStep.cuh" #include "general/renormalizePressure/renormalizePressure.cuh" #include "general/smoothingDensity/smoothingDensity.cuh" #include "../methods/hashSortReorder.cuh" #include "../methods/copyParticles.cuh" #include "../errlog.h" /** * The main loop of the WCSPH Standard model. */ void modelWcsphStandard(int NOB, int TPB, thrust::device_vector<Particle>& pVector, Particle *pSort, uint *gridParticleHash, uint *gridParticleIndex, uint *cellStart, uint *cellEnd, Parameters *par, Parameters *parHost, real time) { STARTLOG("logs/models.log"); static int step = 1; Particle* p = thrust::raw_pointer_cast(pVector.data()); calcNumberOfCells(pVector, par, parHost); calcTimeStep(pVector, par, parHost); hashSortReorder(NOB, TPB, p, par, pSort, gridParticleHash, gridParticleIndex, cellStart, cellEnd, parHost->N); copyParticles << <NOB, TPB >> >(pSort, p, gridParticleIndex, true, par, parHost->N); if ( (parHost->T_SMOOTHING_DENSITY != 0) && (step%parHost->T_SMOOTHING_DENSITY == 0) ) { hipLaunchKernelGGL(( smoothingDensity) , dim3(NOB), dim3(TPB), 0, 0, pSort, gridParticleIndex, cellStart, cellEnd, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("smoothingDensity"); } hipLaunchKernelGGL(( calcPressureWS) , dim3(NOB), dim3(TPB), 0, 0, pSort, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcPressureWS"); if (parHost->T_RENORMALIZE_PRESSURE == 1) { renormalizePressure(NOB, TPB, pSort, par, parHost->N); } hipLaunchKernelGGL(( calcInteractionWS) , dim3(NOB), dim3(TPB), 0, 0, pSort, gridParticleIndex, cellStart, cellEnd, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcInteractionWS"); if (parHost->T_XSPH != 0) { hipLaunchKernelGGL(( calcXsphWS) , dim3(NOB), dim3(TPB), 0, 0, pSort, gridParticleIndex, cellStart, cellEnd, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcXsphWS"); } hipLaunchKernelGGL(( calcAdvectionWS) , dim3(NOB), dim3(TPB), 0, 0, pSort, par, step*parHost->DT); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcAdvectionWS"); copyParticles << <NOB, TPB >> >(p, pSort, gridParticleIndex, false, par, parHost->N); step++; }
6ba348e82d20dfb75424fb13aff3097e2b22e800.cu
/* * wcsphColagrossiLandrini.cuh * * Author: Kamil Szewc ([email protected]) * Modified on: 26-09-2014 * */ #include <thrust/device_vector.h> #include "../sph.h" #include "wcsphStandard/wcsphStandard.cuh" #include "general/calcNumberOfCells/calcNumberOfCells.cuh" #include "general/calcTimeStep/calcTimeStep.cuh" #include "general/renormalizePressure/renormalizePressure.cuh" #include "general/smoothingDensity/smoothingDensity.cuh" #include "../methods/hashSortReorder.cuh" #include "../methods/copyParticles.cuh" #include "../errlog.h" /** * The main loop of the WCSPH Standard model. */ void modelWcsphStandard(int NOB, int TPB, thrust::device_vector<Particle>& pVector, Particle *pSort, uint *gridParticleHash, uint *gridParticleIndex, uint *cellStart, uint *cellEnd, Parameters *par, Parameters *parHost, real time) { STARTLOG("logs/models.log"); static int step = 1; Particle* p = thrust::raw_pointer_cast(pVector.data()); calcNumberOfCells(pVector, par, parHost); calcTimeStep(pVector, par, parHost); hashSortReorder(NOB, TPB, p, par, pSort, gridParticleHash, gridParticleIndex, cellStart, cellEnd, parHost->N); copyParticles << <NOB, TPB >> >(pSort, p, gridParticleIndex, true, par, parHost->N); if ( (parHost->T_SMOOTHING_DENSITY != 0) && (step%parHost->T_SMOOTHING_DENSITY == 0) ) { smoothingDensity <<<NOB, TPB>>>(pSort, gridParticleIndex, cellStart, cellEnd, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("smoothingDensity"); } calcPressureWS <<<NOB, TPB>>>(pSort, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcPressureWS"); if (parHost->T_RENORMALIZE_PRESSURE == 1) { renormalizePressure(NOB, TPB, pSort, par, parHost->N); } calcInteractionWS <<<NOB, TPB>>>(pSort, gridParticleIndex, cellStart, cellEnd, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcInteractionWS"); if (parHost->T_XSPH != 0) { calcXsphWS <<<NOB, TPB>>>(pSort, gridParticleIndex, cellStart, cellEnd, par); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcXsphWS"); } calcAdvectionWS <<<NOB, TPB>>>(pSort, par, step*parHost->DT); HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcAdvectionWS"); copyParticles << <NOB, TPB >> >(p, pSort, gridParticleIndex, false, par, parHost->N); step++; }
f79fdc69e207b2a0b20b9b430093f8f772b2cd2d.hip
// !!! This is a file automatically generated by hipify!!! #include"multi_mm.h" void multi_mm( int* A_row_array, int* A_col_array, hipComplex* A_data_array, int* B_row_array, int* B_col_array, hipComplex* B_data_array, int m, int n, int min, int k, int l, hipsparseOperation_t transA, hipsparseOperation_t transB){ int nnzA=0,nnzB=0,nnzC=0,baseC=0; int* C_row_array; int* C_col_array; hipComplex* C_data_array; hipComplex* d_A_data_array; hipComplex* d_B_data_array; hipComplex* d_C_data_array; int* d_A_row_array; int* d_A_col_array; int* d_B_row_array; int* d_B_col_array; int* d_C_row_array; int* d_C_col_array; hipError_t stat1=hipSuccess; hipError_t stat2=hipSuccess; hipError_t stat3=hipSuccess; hipError_t stat4=hipSuccess; hipError_t stat5=hipSuccess; hipError_t stat6=hipSuccess; nnzA=A_row_array[m*k*l]-A_row_array[0]; nnzB=B_row_array[min*k*l]-B_row_array[0]; printf("%d\n%d %d %d %d %d\n",nnzA,m,n,k,l,nnzB); stat1=hipMalloc((void**)&d_A_row_array,sizeof(int)*(m*k*l+1)); printf("lihai"); for(int i=0;i<(((m<n)?m:n)*k*l+1);i++){ printf("%d\n",B_row_array[i]);} stat2=hipMalloc((void**)&d_A_col_array,sizeof(int)*nnzA); stat3=hipMalloc((void**)&d_A_data_array,sizeof(hipComplex)*nnzA); stat4=hipMalloc((void**)&d_B_row_array,sizeof(int)*(min*k*l+1)); stat5=hipMalloc((void**)&d_B_col_array,sizeof(int)*nnzB); stat6=hipMalloc((void**)&d_B_data_array,sizeof(hipComplex)*nnzB); if( stat1!=hipSuccess|| stat2!=hipSuccess|| stat3!=hipSuccess|| stat4!=hipSuccess|| stat5!=hipSuccess|| stat6!=hipSuccess){ printf("cuda malloc faild\n"); return; } if(hipMemcpy( d_A_row_array, A_row_array, sizeof(int)*(m*l*k+1), hipMemcpyHostToDevice)!=hipSuccess){ printf("cuda memcpy err 1\n"); exit(-1); } if(hipMemcpy( d_A_col_array, A_col_array, sizeof(int)*nnzA, hipMemcpyHostToDevice)!=hipSuccess){ printf("cuda memcpy err 2\n"); exit(-1); } if(hipMemcpy( d_A_data_array, A_data_array, sizeof(hipComplex)*nnzA, hipMemcpyHostToDevice)!=hipSuccess){ printf("cuda memcpy err 3\n"); exit(-1); } if(hipMemcpy( d_B_row_array, B_row_array, sizeof(int)*(min*k*l+1), hipMemcpyHostToDevice)!=hipSuccess){ printf("cuda memcpy err 4\n"); exit(-1); } if(hipMemcpy( d_B_col_array, B_col_array, sizeof(int)*nnzB, hipMemcpyHostToDevice)!=hipSuccess){ printf("cuda memcpy err 5\n"); exit(-1); } if(hipMemcpy( d_B_data_array, B_data_array, sizeof(hipComplex)*nnzB, hipMemcpyHostToDevice)!=hipSuccess){ printf("cuda memcpy err 6\n"); exit(-1); } hipsparseHandle_t handle; if(hipsparseCreate(&handle)!=HIPSPARSE_STATUS_SUCCESS){ printf("cuaparsecreate handle failed\n"); return; } hipsparseMatDescr_t descrA; hipsparseMatDescr_t descrB; hipsparseMatDescr_t descrC; hipsparseStatus_t status=HIPSPARSE_STATUS_SUCCESS; status=hipsparseCreateMatDescr(&descrA); assert(status==HIPSPARSE_STATUS_SUCCESS); status=hipsparseCreateMatDescr(&descrB); assert(status==HIPSPARSE_STATUS_SUCCESS); status=hipsparseCreateMatDescr(&descrC); assert(status==HIPSPARSE_STATUS_SUCCESS); status=hipsparseSetMatType(descrA,HIPSPARSE_MATRIX_TYPE_GENERAL); assert(status==HIPSPARSE_STATUS_SUCCESS); status=hipsparseSetMatType(descrB,HIPSPARSE_MATRIX_TYPE_GENERAL); assert(status==HIPSPARSE_STATUS_SUCCESS); status=hipsparseSetMatType(descrC,HIPSPARSE_MATRIX_TYPE_GENERAL); assert(status==HIPSPARSE_STATUS_SUCCESS); int* nnzTotalDevHostPtr=&nnzC; if(hipMalloc((void**)&d_C_row_array,sizeof(int)*(m*k*l+1))!=hipSuccess){ printf("cuda malloc error\n"); return; } if(hipsparseXcsrgemmNnz( handle, transA, transB, m*k*l, n*k*l, min*k*l, descrA, nnzA, d_A_row_array, d_A_col_array, descrB, nnzB, d_B_row_array, d_B_col_array, descrC, d_C_row_array, nnzTotalDevHostPtr )!=HIPSPARSE_STATUS_SUCCESS){ printf("gemmnz error\n"); exit(-1); } if(hipDeviceSynchronize()!=hipSuccess){ printf("synchronize error\n"); return; } if(NULL!=nnzTotalDevHostPtr){ nnzC=*nnzTotalDevHostPtr; } else{ hipMemcpy( &nnzC, d_C_row_array+m*k*l, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy( &baseC, d_C_row_array, sizeof(int), hipMemcpyDeviceToHost); nnzC=-baseC; } C_row_array=(int*)malloc(sizeof(int)*(m*k*l+1)); C_col_array=(int*)malloc(sizeof(int)*nnzC); C_data_array=(hipComplex*)malloc(sizeof(hipComplex)*nnzC); if( !C_row_array|| !C_col_array|| !C_data_array){ printf("multi_mm malloc error"); } hipError_t status2=hipSuccess; status2=hipMalloc((void**)&d_C_col_array,sizeof(int)*nnzC); assert(status2==hipSuccess); status2=hipMalloc((void**)&d_C_data_array,sizeof(hipComplex)*nnzC); assert(status2==hipSuccess); if(hipsparseCcsrgemm( handle, transA, transB, m*k*l, n*k*l, min*k*l, descrA, nnzA, d_A_data_array, d_A_row_array, d_A_col_array, descrB, nnzB, d_B_data_array, d_B_row_array, d_B_col_array, descrC, d_C_data_array, d_C_row_array, d_C_col_array )!=HIPSPARSE_STATUS_SUCCESS){ printf("csrgemm error\n"); exit(-1); } status2=hipDeviceSynchronize(); assert(status2==hipSuccess); status2=hipMemcpy( C_row_array, d_C_row_array, sizeof(int)*(m*k*l+1), hipMemcpyDeviceToHost); assert(status2==hipSuccess); status2=hipMemcpy( C_col_array, d_C_col_array, sizeof(int)*nnzC, hipMemcpyDeviceToHost); assert(status2==hipSuccess); status2=hipMemcpy( C_data_array, d_C_data_array, sizeof(hipComplex)*nnzC, hipMemcpyDeviceToHost); assert(status2==hipSuccess); status=hipsparseDestroyMatDescr(descrA); assert(status==HIPSPARSE_STATUS_SUCCESS); status=hipsparseDestroyMatDescr(descrB); assert(status==HIPSPARSE_STATUS_SUCCESS); status=hipsparseDestroyMatDescr(descrC); assert(status==HIPSPARSE_STATUS_SUCCESS); status=hipsparseDestroy(handle); assert(status==HIPSPARSE_STATUS_SUCCESS); status2=hipFree(d_A_row_array); assert(status2==hipSuccess); status2=hipFree(d_A_col_array); assert(status2==hipSuccess); status2=hipFree(d_A_data_array); assert(status2==hipSuccess); status2=hipFree(d_B_row_array); assert(status2==hipSuccess); status2=hipFree(d_B_col_array); assert(status2==hipSuccess); status2=hipFree(d_B_data_array); assert(status2==hipSuccess); hipFree(d_C_col_array); hipFree(d_C_row_array); hipFree(d_C_data_array); }
f79fdc69e207b2a0b20b9b430093f8f772b2cd2d.cu
#include"multi_mm.h" void multi_mm( int* A_row_array, int* A_col_array, cuComplex* A_data_array, int* B_row_array, int* B_col_array, cuComplex* B_data_array, int m, int n, int min, int k, int l, cusparseOperation_t transA, cusparseOperation_t transB){ int nnzA=0,nnzB=0,nnzC=0,baseC=0; int* C_row_array; int* C_col_array; cuComplex* C_data_array; cuComplex* d_A_data_array; cuComplex* d_B_data_array; cuComplex* d_C_data_array; int* d_A_row_array; int* d_A_col_array; int* d_B_row_array; int* d_B_col_array; int* d_C_row_array; int* d_C_col_array; cudaError_t stat1=cudaSuccess; cudaError_t stat2=cudaSuccess; cudaError_t stat3=cudaSuccess; cudaError_t stat4=cudaSuccess; cudaError_t stat5=cudaSuccess; cudaError_t stat6=cudaSuccess; nnzA=A_row_array[m*k*l]-A_row_array[0]; nnzB=B_row_array[min*k*l]-B_row_array[0]; printf("%d\n%d %d %d %d %d\n",nnzA,m,n,k,l,nnzB); stat1=cudaMalloc((void**)&d_A_row_array,sizeof(int)*(m*k*l+1)); printf("lihai"); for(int i=0;i<(((m<n)?m:n)*k*l+1);i++){ printf("%d\n",B_row_array[i]);} stat2=cudaMalloc((void**)&d_A_col_array,sizeof(int)*nnzA); stat3=cudaMalloc((void**)&d_A_data_array,sizeof(cuComplex)*nnzA); stat4=cudaMalloc((void**)&d_B_row_array,sizeof(int)*(min*k*l+1)); stat5=cudaMalloc((void**)&d_B_col_array,sizeof(int)*nnzB); stat6=cudaMalloc((void**)&d_B_data_array,sizeof(cuComplex)*nnzB); if( stat1!=cudaSuccess|| stat2!=cudaSuccess|| stat3!=cudaSuccess|| stat4!=cudaSuccess|| stat5!=cudaSuccess|| stat6!=cudaSuccess){ printf("cuda malloc faild\n"); return; } if(cudaMemcpy( d_A_row_array, A_row_array, sizeof(int)*(m*l*k+1), cudaMemcpyHostToDevice)!=cudaSuccess){ printf("cuda memcpy err 1\n"); exit(-1); } if(cudaMemcpy( d_A_col_array, A_col_array, sizeof(int)*nnzA, cudaMemcpyHostToDevice)!=cudaSuccess){ printf("cuda memcpy err 2\n"); exit(-1); } if(cudaMemcpy( d_A_data_array, A_data_array, sizeof(cuComplex)*nnzA, cudaMemcpyHostToDevice)!=cudaSuccess){ printf("cuda memcpy err 3\n"); exit(-1); } if(cudaMemcpy( d_B_row_array, B_row_array, sizeof(int)*(min*k*l+1), cudaMemcpyHostToDevice)!=cudaSuccess){ printf("cuda memcpy err 4\n"); exit(-1); } if(cudaMemcpy( d_B_col_array, B_col_array, sizeof(int)*nnzB, cudaMemcpyHostToDevice)!=cudaSuccess){ printf("cuda memcpy err 5\n"); exit(-1); } if(cudaMemcpy( d_B_data_array, B_data_array, sizeof(cuComplex)*nnzB, cudaMemcpyHostToDevice)!=cudaSuccess){ printf("cuda memcpy err 6\n"); exit(-1); } cusparseHandle_t handle; if(cusparseCreate(&handle)!=CUSPARSE_STATUS_SUCCESS){ printf("cuaparsecreate handle failed\n"); return; } cusparseMatDescr_t descrA; cusparseMatDescr_t descrB; cusparseMatDescr_t descrC; cusparseStatus_t status=CUSPARSE_STATUS_SUCCESS; status=cusparseCreateMatDescr(&descrA); assert(status==CUSPARSE_STATUS_SUCCESS); status=cusparseCreateMatDescr(&descrB); assert(status==CUSPARSE_STATUS_SUCCESS); status=cusparseCreateMatDescr(&descrC); assert(status==CUSPARSE_STATUS_SUCCESS); status=cusparseSetMatType(descrA,CUSPARSE_MATRIX_TYPE_GENERAL); assert(status==CUSPARSE_STATUS_SUCCESS); status=cusparseSetMatType(descrB,CUSPARSE_MATRIX_TYPE_GENERAL); assert(status==CUSPARSE_STATUS_SUCCESS); status=cusparseSetMatType(descrC,CUSPARSE_MATRIX_TYPE_GENERAL); assert(status==CUSPARSE_STATUS_SUCCESS); int* nnzTotalDevHostPtr=&nnzC; if(cudaMalloc((void**)&d_C_row_array,sizeof(int)*(m*k*l+1))!=cudaSuccess){ printf("cuda malloc error\n"); return; } if(cusparseXcsrgemmNnz( handle, transA, transB, m*k*l, n*k*l, min*k*l, descrA, nnzA, d_A_row_array, d_A_col_array, descrB, nnzB, d_B_row_array, d_B_col_array, descrC, d_C_row_array, nnzTotalDevHostPtr )!=CUSPARSE_STATUS_SUCCESS){ printf("gemmnz error\n"); exit(-1); } if(cudaDeviceSynchronize()!=cudaSuccess){ printf("synchronize error\n"); return; } if(NULL!=nnzTotalDevHostPtr){ nnzC=*nnzTotalDevHostPtr; } else{ cudaMemcpy( &nnzC, d_C_row_array+m*k*l, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy( &baseC, d_C_row_array, sizeof(int), cudaMemcpyDeviceToHost); nnzC=-baseC; } C_row_array=(int*)malloc(sizeof(int)*(m*k*l+1)); C_col_array=(int*)malloc(sizeof(int)*nnzC); C_data_array=(cuComplex*)malloc(sizeof(cuComplex)*nnzC); if( !C_row_array|| !C_col_array|| !C_data_array){ printf("multi_mm malloc error"); } cudaError_t status2=cudaSuccess; status2=cudaMalloc((void**)&d_C_col_array,sizeof(int)*nnzC); assert(status2==cudaSuccess); status2=cudaMalloc((void**)&d_C_data_array,sizeof(cuComplex)*nnzC); assert(status2==cudaSuccess); if(cusparseCcsrgemm( handle, transA, transB, m*k*l, n*k*l, min*k*l, descrA, nnzA, d_A_data_array, d_A_row_array, d_A_col_array, descrB, nnzB, d_B_data_array, d_B_row_array, d_B_col_array, descrC, d_C_data_array, d_C_row_array, d_C_col_array )!=CUSPARSE_STATUS_SUCCESS){ printf("csrgemm error\n"); exit(-1); } status2=cudaDeviceSynchronize(); assert(status2==cudaSuccess); status2=cudaMemcpy( C_row_array, d_C_row_array, sizeof(int)*(m*k*l+1), cudaMemcpyDeviceToHost); assert(status2==cudaSuccess); status2=cudaMemcpy( C_col_array, d_C_col_array, sizeof(int)*nnzC, cudaMemcpyDeviceToHost); assert(status2==cudaSuccess); status2=cudaMemcpy( C_data_array, d_C_data_array, sizeof(cuComplex)*nnzC, cudaMemcpyDeviceToHost); assert(status2==cudaSuccess); status=cusparseDestroyMatDescr(descrA); assert(status==CUSPARSE_STATUS_SUCCESS); status=cusparseDestroyMatDescr(descrB); assert(status==CUSPARSE_STATUS_SUCCESS); status=cusparseDestroyMatDescr(descrC); assert(status==CUSPARSE_STATUS_SUCCESS); status=cusparseDestroy(handle); assert(status==CUSPARSE_STATUS_SUCCESS); status2=cudaFree(d_A_row_array); assert(status2==cudaSuccess); status2=cudaFree(d_A_col_array); assert(status2==cudaSuccess); status2=cudaFree(d_A_data_array); assert(status2==cudaSuccess); status2=cudaFree(d_B_row_array); assert(status2==cudaSuccess); status2=cudaFree(d_B_col_array); assert(status2==cudaSuccess); status2=cudaFree(d_B_data_array); assert(status2==cudaSuccess); cudaFree(d_C_col_array); cudaFree(d_C_row_array); cudaFree(d_C_data_array); }
d12fa141b2605b1a0fed357940a7f970cd169dbc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> #include<stdlib.h> #include<string.h> int main() { int deviceCount; hipGetDeviceCount(&deviceCount); int dev; for (dev = 0; dev < deviceCount; dev++) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (/*deviceProp.major==9999 && */deviceProp.minor = 9999&&deviceProp.major==9999) printf("\n"); } printf("\nDevice%d:\"%s\"\n", dev, deviceProp.name); printf("Total amount of global memory %u bytes\n", deviceProp.totalGlobalMem); printf("Number of mltiprocessors %d\n", deviceProp.multiProcessorCount); printf("Total amount of constant memory: %u bytes\n", deviceProp.totalConstMem); printf("Total amount of shared memory per block %u bytes\n", deviceProp.sharedMemPerBlock); printf("Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf("Warp size %d\n", deviceProp.warpSize); printf("Maximum number of threada per block: %d\n", deviceProp.maxThreadsPerBlock); printf("Maximum sizes of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf("Maximum size of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf("Maximum memory pitch : %u bytes\n", deviceProp.memPitch); printf("Texture alignmemt %u bytes\n", deviceProp.texturePitchAlignment); printf("Clock rate %.2f GHz\n", deviceProp.clockRate*1e-6f); } printf("\nTest PASSED\n"); getchar(); }
d12fa141b2605b1a0fed357940a7f970cd169dbc.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> #include<stdlib.h> #include<string.h> int main() { int deviceCount; cudaGetDeviceCount(&deviceCount); int dev; for (dev = 0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (/*deviceProp.major==9999 && */deviceProp.minor = 9999&&deviceProp.major==9999) printf("\n"); } printf("\nDevice%d:\"%s\"\n", dev, deviceProp.name); printf("Total amount of global memory %u bytes\n", deviceProp.totalGlobalMem); printf("Number of mltiprocessors %d\n", deviceProp.multiProcessorCount); printf("Total amount of constant memory: %u bytes\n", deviceProp.totalConstMem); printf("Total amount of shared memory per block %u bytes\n", deviceProp.sharedMemPerBlock); printf("Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf("Warp size %d\n", deviceProp.warpSize); printf("Maximum number of threada per block: %d\n", deviceProp.maxThreadsPerBlock); printf("Maximum sizes of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf("Maximum size of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf("Maximum memory pitch : %u bytes\n", deviceProp.memPitch); printf("Texture alignmemt %u bytes\n", deviceProp.texturePitchAlignment); printf("Clock rate %.2f GHz\n", deviceProp.clockRate*1e-6f); } printf("\nTest PASSED\n"); getchar(); }
baed6cef65408118bd65acdf5086120e8c12c929.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <list> #include <vector> #include <iostream> using namespace std; #define N 10 int main() { thrust::device_vector<int> D(N, 1); thrust::fill(D.begin(), D.begin() + 7, 9); // 9 9 9 9 9 9 9 1 1 1 thrust::host_vector<int> H(D.begin(), D.begin() + 5); // 9 9 9 9 9 thrust::sequence(H.begin(), H.end()); // 0 1 2 3 4 // copy all H to D thrust::copy(H.begin(), H.end(), D.begin()); // can copy back to H H.resize(N); thrust::copy(D.begin(), D.end(), H.begin()); cout << "H: "; thrust::copy(H.begin(), H.end(), ostream_iterator<int>(cout, " ")); cout << endl; cout << "D is: "; thrust::copy(D.begin(), D.end(), ostream_iterator<int>(cout, " ")); cout << endl; // OK to use regular vector too vector<int> regular(N); thrust::copy(D.begin(), D.end(), regular.begin()); cout << "Regular vec is: "; thrust::copy(regular.begin(), regular.end(), ostream_iterator<int>(cout, " ")); cout << endl; /////////////////////////////////////////////////////////////////// // how to use raw pointer to device mem int *ptr; hipMalloc((void **) &ptr, N * sizeof(int)); thrust::device_ptr<int> dev_ptr(ptr); //then we can do this thrust::fill(dev_ptr, dev_ptr + N, (int)99); cout << "Raw ptr is: "; for(auto i = dev_ptr; i != dev_ptr + N; ++i) cout << *i << " "; cout << endl; // how to extract raw pointer from device pointer (then what?) thrust::device_ptr<int> dev_ptr2 = thrust::device_malloc<int> (N); int *ptr2 = thrust::raw_pointer_cast(dev_ptr2); int *ptr3 = thrust::raw_pointer_cast(dev_ptr); // segfault below /* cout << "Casted ptr is: "; for(auto i = ptr2; i != ptr2 + N; ++i) cout << *i << " "; cout << endl; */ ///////////////////////////////////////////////////////////////// // however using iterator (compared to pointer) is better choice for traversing list<int> lst; lst.push_back(10); lst.push_back(20); lst.push_back(30); lst.push_back(40); thrust::device_vector<int> DD(lst.begin(), lst.end()); cout << "DD is: "; thrust::copy(DD.begin(), DD.end(), ostream_iterator<int>(cout, " ")); cout << endl; return 0; }
baed6cef65408118bd65acdf5086120e8c12c929.cu
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <list> #include <vector> #include <iostream> using namespace std; #define N 10 int main() { thrust::device_vector<int> D(N, 1); thrust::fill(D.begin(), D.begin() + 7, 9); // 9 9 9 9 9 9 9 1 1 1 thrust::host_vector<int> H(D.begin(), D.begin() + 5); // 9 9 9 9 9 thrust::sequence(H.begin(), H.end()); // 0 1 2 3 4 // copy all H to D thrust::copy(H.begin(), H.end(), D.begin()); // can copy back to H H.resize(N); thrust::copy(D.begin(), D.end(), H.begin()); cout << "H: "; thrust::copy(H.begin(), H.end(), ostream_iterator<int>(cout, " ")); cout << endl; cout << "D is: "; thrust::copy(D.begin(), D.end(), ostream_iterator<int>(cout, " ")); cout << endl; // OK to use regular vector too vector<int> regular(N); thrust::copy(D.begin(), D.end(), regular.begin()); cout << "Regular vec is: "; thrust::copy(regular.begin(), regular.end(), ostream_iterator<int>(cout, " ")); cout << endl; /////////////////////////////////////////////////////////////////// // how to use raw pointer to device mem int *ptr; cudaMalloc((void **) &ptr, N * sizeof(int)); thrust::device_ptr<int> dev_ptr(ptr); //then we can do this thrust::fill(dev_ptr, dev_ptr + N, (int)99); cout << "Raw ptr is: "; for(auto i = dev_ptr; i != dev_ptr + N; ++i) cout << *i << " "; cout << endl; // how to extract raw pointer from device pointer (then what?) thrust::device_ptr<int> dev_ptr2 = thrust::device_malloc<int> (N); int *ptr2 = thrust::raw_pointer_cast(dev_ptr2); int *ptr3 = thrust::raw_pointer_cast(dev_ptr); // segfault below /* cout << "Casted ptr is: "; for(auto i = ptr2; i != ptr2 + N; ++i) cout << *i << " "; cout << endl; */ ///////////////////////////////////////////////////////////////// // however using iterator (compared to pointer) is better choice for traversing list<int> lst; lst.push_back(10); lst.push_back(20); lst.push_back(30); lst.push_back(40); thrust::device_vector<int> DD(lst.begin(), lst.end()); cout << "DD is: "; thrust::copy(DD.begin(), DD.end(), ostream_iterator<int>(cout, " ")); cout << endl; return 0; }
608cf88daa5c897f5f69373974ebfe30f794ef73.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> int n = 200; using namespace std; __device__ float generate( hiprandState_t* globalState, int ind ) { //int ind = threadIdx.x; hiprandState_t localState = globalState[ind]; float RANDOM = hiprand_uniform( &localState ); globalState[ind] = localState; return RANDOM; } __global__ void setup_kernel ( hiprandState_t * state, unsigned long seed ) { int id = threadIdx.x; hiprand_init ( seed, id, 0, &state[id] ); } __global__ void kernel(float* N, hiprandState_t* globalState, int n) { // generate random numbers for(int i=0;i<40;i++) { int k = generate(globalState, i) * 10; /* while(k > n*n-1) { k-=(n*n-1); } */ N[i] = k; } } int main() { int N=40; hiprandState_t* devStates; hipMalloc ( &devStates, N*sizeof( hiprandState_t ) ); // setup seeds hipLaunchKernelGGL(( setup_kernel) , dim3(1), dim3(N) , 0, 0, devStates,unsigned(time(NULL)) ); float N2[40]; float* N3; hipMalloc((void**) &N3, sizeof(float)*N); hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, N3, devStates, n); hipMemcpy(N2, N3, sizeof(float)*N, hipMemcpyDeviceToHost); for(int i=0;i<N;i++) { cout<<N2[i]<<endl; } return 0; }
608cf88daa5c897f5f69373974ebfe30f794ef73.cu
#include <iostream> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> int n = 200; using namespace std; __device__ float generate( curandState* globalState, int ind ) { //int ind = threadIdx.x; curandState localState = globalState[ind]; float RANDOM = curand_uniform( &localState ); globalState[ind] = localState; return RANDOM; } __global__ void setup_kernel ( curandState * state, unsigned long seed ) { int id = threadIdx.x; curand_init ( seed, id, 0, &state[id] ); } __global__ void kernel(float* N, curandState* globalState, int n) { // generate random numbers for(int i=0;i<40;i++) { int k = generate(globalState, i) * 10; /* while(k > n*n-1) { k-=(n*n-1); } */ N[i] = k; } } int main() { int N=40; curandState* devStates; cudaMalloc ( &devStates, N*sizeof( curandState ) ); // setup seeds setup_kernel <<< 1, N >>> ( devStates,unsigned(time(NULL)) ); float N2[40]; float* N3; cudaMalloc((void**) &N3, sizeof(float)*N); kernel<<<1,1>>> (N3, devStates, n); cudaMemcpy(N2, N3, sizeof(float)*N, cudaMemcpyDeviceToHost); for(int i=0;i<N;i++) { cout<<N2[i]<<endl; } return 0; }
8df577e01e915f732800f0cac07ce0d905500938.hip
// !!! This is a file automatically generated by hipify!!! #include "fft.h" #include "hipfft.h" #include "utils.h" #include "perm_filter.h" extern "C" { #include "timer.h" } #include "hip/hip_complex.h" #include <hip/hip_runtime.h> __global__ void PermFilterKernel(hipDoubleComplex* d_origx, hipDoubleComplex* d_filter, int* d_permute, hipDoubleComplex* d_x_sampt, int B, int n, int loops, int round) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < loops*B) { int i_mod_B = i & (B-1); int off = i_mod_B * round; int tmp = i/B; int ai = d_permute[tmp]; hipDoubleComplex tmp_value1, tmp_value2; for(int j=0; j<round; j++){ int index = (i_mod_B + B*j)*ai &(n-1); tmp_value1 = cuCmul(d_origx[index],d_filter[off+j]); tmp_value2 = cuCadd(tmp_value1, tmp_value2); } d_x_sampt[i] = tmp_value2; } } extern "C" void inner_loop_step_a_plus_b(complex_t *origx, Filter *filter, complex_t *x_sampt, int*permute, int n, int B, int loops, double *PF_ALL, double *B_ALL, float *DtoH, float *HtoD, unsigned int plan1) { int filter_size = filter->sizet; int round = filter_size/B; complex_t *d_origx, *d_filter, *d_x_sampt; int *d_permute; hipfftHandle plan = (hipfftHandle)plan1; hipfftResult err; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //Start of timing on HtoD hipEventRecord(start); //Allocate device memory and copy to device hipMalloc((void**)&d_origx, n*sizeof(complex_t)); hipMemcpy(d_origx, origx, n*sizeof(complex_t), hipMemcpyHostToDevice); hipMalloc((void**)&d_filter, filter_size*sizeof(complex_t)); hipMemcpy(d_filter, filter->time, filter_size*sizeof(complex_t), hipMemcpyHostToDevice); hipMalloc((void**)&d_permute, loops*sizeof(int)); hipMemcpy(d_permute, permute, loops*sizeof(int), hipMemcpyHostToDevice); hipMalloc((void**)&d_x_sampt, loops*B*sizeof(complex_t)); //End of timing on HtoD hipEventRecord(stop); hipEventSynchronize(stop); float time; hipEventElapsedTime(&time,start, stop); *HtoD = (float)time/1e3; //Start of the kernels //Start of timing on kernels hipEventRecord(start); dim3 dimBlock(512); dim3 dimGrid(loops*B/dimBlock.x); hipLaunchKernelGGL(( PermFilterKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, (hipDoubleComplex*)d_origx, (hipDoubleComplex*)d_filter, d_permute, (hipDoubleComplex*)d_x_sampt, B, n,loops,round); //End of timing on kernerls hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&time,start, stop); *PF_ALL = (float)time/1e3; //Step B -- cuFFT of B-dimensional FFT double DDD = get_time(); err = hipfftExecZ2Z(plan, (hipfftDoubleComplex *)d_x_sampt, (hipfftDoubleComplex *)d_x_sampt, HIPFFT_FORWARD); if (err != HIPFFT_SUCCESS){ fprintf(stderr, "CUFFT error: Execution failed, error code is %d\n", err); exit(-1); } *B_ALL = get_time() - DDD; //Transfer back the d_x_sampt in freq domain //Start of timing on DtoH hipEventRecord(start); hipMemcpy(x_sampt, d_x_sampt, loops*B*sizeof(complex_t), hipMemcpyDeviceToHost); //End of timing on DtoH hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&time,start, stop); *DtoH = (float)time/1e3; //destroy plan and device memory hipfftDestroy(plan); hipFree(d_origx); hipFree(d_filter); hipFree(d_x_sampt); hipFree(d_permute); hipEventDestroy(start); hipEventDestroy(stop); }
8df577e01e915f732800f0cac07ce0d905500938.cu
#include "fft.h" #include "cufft.h" #include "utils.h" #include "perm_filter.h" extern "C" { #include "timer.h" } #include "cuComplex.h" #include <cuda.h> __global__ void PermFilterKernel(cuDoubleComplex* d_origx, cuDoubleComplex* d_filter, int* d_permute, cuDoubleComplex* d_x_sampt, int B, int n, int loops, int round) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < loops*B) { int i_mod_B = i & (B-1); int off = i_mod_B * round; int tmp = i/B; int ai = d_permute[tmp]; cuDoubleComplex tmp_value1, tmp_value2; for(int j=0; j<round; j++){ int index = (i_mod_B + B*j)*ai &(n-1); tmp_value1 = cuCmul(d_origx[index],d_filter[off+j]); tmp_value2 = cuCadd(tmp_value1, tmp_value2); } d_x_sampt[i] = tmp_value2; } } extern "C" void inner_loop_step_a_plus_b(complex_t *origx, Filter *filter, complex_t *x_sampt, int*permute, int n, int B, int loops, double *PF_ALL, double *B_ALL, float *DtoH, float *HtoD, unsigned int plan1) { int filter_size = filter->sizet; int round = filter_size/B; complex_t *d_origx, *d_filter, *d_x_sampt; int *d_permute; cufftHandle plan = (cufftHandle)plan1; cufftResult err; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //Start of timing on HtoD cudaEventRecord(start); //Allocate device memory and copy to device cudaMalloc((void**)&d_origx, n*sizeof(complex_t)); cudaMemcpy(d_origx, origx, n*sizeof(complex_t), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_filter, filter_size*sizeof(complex_t)); cudaMemcpy(d_filter, filter->time, filter_size*sizeof(complex_t), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_permute, loops*sizeof(int)); cudaMemcpy(d_permute, permute, loops*sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_x_sampt, loops*B*sizeof(complex_t)); //End of timing on HtoD cudaEventRecord(stop); cudaEventSynchronize(stop); float time; cudaEventElapsedTime(&time,start, stop); *HtoD = (float)time/1e3; //Start of the kernels //Start of timing on kernels cudaEventRecord(start); dim3 dimBlock(512); dim3 dimGrid(loops*B/dimBlock.x); PermFilterKernel<<<dimGrid, dimBlock>>>((cuDoubleComplex*)d_origx, (cuDoubleComplex*)d_filter, d_permute, (cuDoubleComplex*)d_x_sampt, B, n,loops,round); //End of timing on kernerls cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start, stop); *PF_ALL = (float)time/1e3; //Step B -- cuFFT of B-dimensional FFT double DDD = get_time(); err = cufftExecZ2Z(plan, (cufftDoubleComplex *)d_x_sampt, (cufftDoubleComplex *)d_x_sampt, CUFFT_FORWARD); if (err != CUFFT_SUCCESS){ fprintf(stderr, "CUFFT error: Execution failed, error code is %d\n", err); exit(-1); } *B_ALL = get_time() - DDD; //Transfer back the d_x_sampt in freq domain //Start of timing on DtoH cudaEventRecord(start); cudaMemcpy(x_sampt, d_x_sampt, loops*B*sizeof(complex_t), cudaMemcpyDeviceToHost); //End of timing on DtoH cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start, stop); *DtoH = (float)time/1e3; //destroy plan and device memory cufftDestroy(plan); cudaFree(d_origx); cudaFree(d_filter); cudaFree(d_x_sampt); cudaFree(d_permute); cudaEventDestroy(start); cudaEventDestroy(stop); }
643af5caae09d277c1e3989d676f9aa5eed4127b.hip
// !!! This is a file automatically generated by hipify!!! /* struct sparse_csr_weighted{ n_nodes *number_of_nodes; double *confidence; double *influence; int *col_index; int *row_ptr; }; */ // __host__ network_in_device cp_to_device(const sparse_csr_weighted &csr_info, const network_info &h_nw_info){ network_in_device nw_device; sparse_csr_weighted& device_initial_info = nw_device.csr_info; network_info& nw_info = nw_device.nw_info; simulation_single& sim_ptr = nw_device.sim_ptr; const n_nodes num_nodes = *csr_info.number_of_nodes; const uint8_t n_links = csr_info.row_ptr[num_nodes]; const uint8_t& t_length = *h_nw_info.time_length; hipMalloc((void**) &(device_initial_info.number_of_nodes), sizeof(n_nodes)); hipMemcpy(device_initial_info.number_of_nodes, csr_info.number_of_nodes, sizeof(n_nodes), hipMemcpyHostToDevice); hipMalloc((void**) &(device_initial_info.confidence), num_nodes * sizeof(double)); hipMemcpy(device_initial_info.confidence, csr_info.confidence, num_nodes * sizeof(double), hipMemcpyHostToDevice); hipMalloc((void**) &(device_initial_info.influence), n_links * sizeof(double)); hipMemcpy(device_initial_info.influence, csr_info.influence, n_links * sizeof(double), hipMemcpyHostToDevice); hipMalloc((void**) &(device_initial_info.col_index), n_links * sizeof(int)); hipMemcpy(device_initial_info.col_index, csr_info.col_index, n_links * sizeof(int), hipMemcpyHostToDevice); hipMalloc((void**) &(device_initial_info.row_ptr), (num_nodes + 1) * sizeof(int)); hipMemcpy(device_initial_info.row_ptr, csr_info.row_ptr, (num_nodes + 1) * sizeof(int), hipMemcpyHostToDevice); hipMalloc((void**) &(nw_info.nodes_types), num_nodes * sizeof(node_type)); hipMemcpy(nw_info.nodes_types, h_nw_info.nodes_types, num_nodes * sizeof(node_type), hipMemcpyHostToDevice); hipMalloc((void**) &(nw_info.time_length), sizeof(uint8_t)); hipMemcpy(nw_info.time_length, &t_length, sizeof(uint8_t), hipMemcpyHostToDevice); hipMalloc((void**) &(nw_info.p_threshold), sizeof(double)); hipMemcpy(nw_info.p_threshold, h_nw_info.p_threshold, sizeof(double), hipMemcpyHostToDevice); hipMalloc((void**) &(nw_info.n_threshold), sizeof(double)); hipMemcpy(nw_info.n_threshold, h_nw_info.n_threshold, sizeof(double), hipMemcpyHostToDevice); // accrued evidence is num_nodes * time_length hipMalloc((void**) &(sim_ptr.evidence), num_nodes * t_length * sizeof(double)); // think i'd rather use local memory hipMalloc((void**) &(sim_ptr.activated_positive), num_nodes * t_length * sizeof(int)); hipMalloc((void**) &(sim_ptr.activated_negative), num_nodes * t_length * sizeof(int)); hipMalloc((void**) &(sim_ptr.total_activated_positive), num_nodes * sizeof(int)); hipMalloc((void**) &(sim_ptr.total_activated_negative), num_nodes * sizeof(int)); return nw_device; } void clean_device_memory(network_in_device &nw_device){ sparse_csr_weighted& device_initial_info = nw_device.csr_info; network_info& nw_info = nw_device.nw_info; simulation_single& sim_ptr = nw_device.sim_ptr; hipFree(device_initial_info.number_of_nodes); hipFree(device_initial_info.confidence); hipFree(device_initial_info.influence); hipFree(device_initial_info.col_index); hipFree(device_initial_info.row_ptr); hipFree(nw_info.nodes_types); hipFree(nw_info.time_length); hipFree(nw_info.p_threshold); hipFree(nw_info.n_threshold); hipFree(sim_ptr.evidence); hipFree(sim_ptr.activated_positive); hipFree(sim_ptr.activated_negative); return; }
643af5caae09d277c1e3989d676f9aa5eed4127b.cu
/* struct sparse_csr_weighted{ n_nodes *number_of_nodes; double *confidence; double *influence; int *col_index; int *row_ptr; }; */ // __host__ network_in_device cp_to_device(const sparse_csr_weighted &csr_info, const network_info &h_nw_info){ network_in_device nw_device; sparse_csr_weighted& device_initial_info = nw_device.csr_info; network_info& nw_info = nw_device.nw_info; simulation_single& sim_ptr = nw_device.sim_ptr; const n_nodes num_nodes = *csr_info.number_of_nodes; const uint8_t n_links = csr_info.row_ptr[num_nodes]; const uint8_t& t_length = *h_nw_info.time_length; cudaMalloc((void**) &(device_initial_info.number_of_nodes), sizeof(n_nodes)); cudaMemcpy(device_initial_info.number_of_nodes, csr_info.number_of_nodes, sizeof(n_nodes), cudaMemcpyHostToDevice); cudaMalloc((void**) &(device_initial_info.confidence), num_nodes * sizeof(double)); cudaMemcpy(device_initial_info.confidence, csr_info.confidence, num_nodes * sizeof(double), cudaMemcpyHostToDevice); cudaMalloc((void**) &(device_initial_info.influence), n_links * sizeof(double)); cudaMemcpy(device_initial_info.influence, csr_info.influence, n_links * sizeof(double), cudaMemcpyHostToDevice); cudaMalloc((void**) &(device_initial_info.col_index), n_links * sizeof(int)); cudaMemcpy(device_initial_info.col_index, csr_info.col_index, n_links * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**) &(device_initial_info.row_ptr), (num_nodes + 1) * sizeof(int)); cudaMemcpy(device_initial_info.row_ptr, csr_info.row_ptr, (num_nodes + 1) * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**) &(nw_info.nodes_types), num_nodes * sizeof(node_type)); cudaMemcpy(nw_info.nodes_types, h_nw_info.nodes_types, num_nodes * sizeof(node_type), cudaMemcpyHostToDevice); cudaMalloc((void**) &(nw_info.time_length), sizeof(uint8_t)); cudaMemcpy(nw_info.time_length, &t_length, sizeof(uint8_t), cudaMemcpyHostToDevice); cudaMalloc((void**) &(nw_info.p_threshold), sizeof(double)); cudaMemcpy(nw_info.p_threshold, h_nw_info.p_threshold, sizeof(double), cudaMemcpyHostToDevice); cudaMalloc((void**) &(nw_info.n_threshold), sizeof(double)); cudaMemcpy(nw_info.n_threshold, h_nw_info.n_threshold, sizeof(double), cudaMemcpyHostToDevice); // accrued evidence is num_nodes * time_length cudaMalloc((void**) &(sim_ptr.evidence), num_nodes * t_length * sizeof(double)); // think i'd rather use local memory cudaMalloc((void**) &(sim_ptr.activated_positive), num_nodes * t_length * sizeof(int)); cudaMalloc((void**) &(sim_ptr.activated_negative), num_nodes * t_length * sizeof(int)); cudaMalloc((void**) &(sim_ptr.total_activated_positive), num_nodes * sizeof(int)); cudaMalloc((void**) &(sim_ptr.total_activated_negative), num_nodes * sizeof(int)); return nw_device; } void clean_device_memory(network_in_device &nw_device){ sparse_csr_weighted& device_initial_info = nw_device.csr_info; network_info& nw_info = nw_device.nw_info; simulation_single& sim_ptr = nw_device.sim_ptr; cudaFree(device_initial_info.number_of_nodes); cudaFree(device_initial_info.confidence); cudaFree(device_initial_info.influence); cudaFree(device_initial_info.col_index); cudaFree(device_initial_info.row_ptr); cudaFree(nw_info.nodes_types); cudaFree(nw_info.time_length); cudaFree(nw_info.p_threshold); cudaFree(nw_info.n_threshold); cudaFree(sim_ptr.evidence); cudaFree(sim_ptr.activated_positive); cudaFree(sim_ptr.activated_negative); return; }
1280d3f56d467c43f9f9d2be2d33ccffc7d7d356.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/equal.h> #include <torch/extension.h> #include <hipcub/hipcub.hpp> #include <vector> #include <ATen/Context.h> #include <THH/THH.h> #include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h> #include <THH/THHThrustAllocator.cuh> #define PI 3.141592653589793 using torch::Tensor; using torch::autograd::tensor_list; template <typename DataT, typename IndexT = int> struct AEVScalarParams { DataT Rcr; DataT Rca; IndexT radial_sublength; IndexT radial_length; IndexT angular_sublength; IndexT angular_length; IndexT num_species; AEVScalarParams() = default; AEVScalarParams(const torch::IValue& aev_params_ivalue) { c10::intrusive_ptr<c10::ivalue::Tuple> aev_params_tuple_ptr = aev_params_ivalue.toTuple(); auto aev_params_tuple = aev_params_tuple_ptr->elements(); Rcr = static_cast<DataT>(aev_params_tuple[0].toDouble()); Rca = static_cast<DataT>(aev_params_tuple[1].toDouble()); radial_sublength = static_cast<IndexT>(aev_params_tuple[2].toInt()); radial_length = static_cast<IndexT>(aev_params_tuple[3].toInt()); angular_sublength = static_cast<IndexT>(aev_params_tuple[4].toInt()); angular_length = static_cast<IndexT>(aev_params_tuple[5].toInt()); num_species = static_cast<IndexT>(aev_params_tuple[6].toInt()); } operator torch::IValue() { return torch::IValue(std::make_tuple( (double)Rcr, (double)Rca, radial_sublength, radial_length, angular_sublength, angular_length, num_species)); } }; #define MAX_NSPECIES 10 __constant__ int csubaev_offsets[MAX_NSPECIES * MAX_NSPECIES]; template <typename DataT> struct PairDist { DataT Rij; int midx; short i; short j; }; // used to group Rijs by atom id template <typename DataT> __host__ __device__ bool operator==(const PairDist<DataT>& lhs, const PairDist<DataT>& rhs) { return lhs.midx == rhs.midx && lhs.i == rhs.i; } /// Alignment of memory. Must be a power of two /// \tparam boundary Boundary to align to (NOTE: must be power of 2) /// \param value Input value that is to be aligned /// \return Value aligned to boundary template <int32_t boundary> __host__ __device__ __forceinline__ int align(const int& value) { static_assert((boundary & (boundary - 1)) == 0, "Boundary for align must be power of 2"); return (value + boundary) & ~(boundary - 1); } template <typename SpeciesT, typename DataT, typename IndexT = int> __global__ void pairwiseDistance( torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t, PairDist<DataT>* d_Rij, IndexT max_natoms_per_mol) { extern __shared__ DataT spos[]; DataT* sx = &spos[0]; DataT* sy = &spos[max_natoms_per_mol]; DataT* sz = &spos[2 * max_natoms_per_mol]; int mol_idx = blockIdx.x; int tidx = threadIdx.y * blockDim.x + threadIdx.x; for (int i = tidx; i < max_natoms_per_mol; i += blockDim.x * blockDim.y) { sx[i] = pos_t[mol_idx][i][0]; sy[i] = pos_t[mol_idx][i][1]; sz[i] = pos_t[mol_idx][i][2]; } __syncthreads(); int natom_pairs = max_natoms_per_mol * max_natoms_per_mol; for (int i = threadIdx.y; i < max_natoms_per_mol; i += blockDim.y) { SpeciesT type_i = species_t[mol_idx][i]; DataT xi = sx[i]; DataT yi = sy[i]; DataT zi = sz[i]; for (int j = threadIdx.x; j < max_natoms_per_mol; j += blockDim.x) { SpeciesT type_j = species_t[mol_idx][j]; const DataT xj = sx[j]; const DataT yj = sy[j]; const DataT zj = sz[j]; const DataT delx = xj - xi; const DataT dely = yj - yi; const DataT delz = zj - zi; const DataT Rsq = delx * delx + dely * dely + delz * delz; if (type_i != -1 && type_j != -1 && i != j) { DataT Rij = sqrt(Rsq); PairDist<DataT> d; d.Rij = Rij; d.midx = mol_idx; d.i = i; d.j = j; d_Rij[mol_idx * natom_pairs + i * max_natoms_per_mol + j] = d; } } } } template <typename SpeciesT, typename DataT, typename IndexT = int> __global__ void pairwiseDistanceSingleMolecule( torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t, PairDist<DataT>* d_Rij, IndexT max_natoms_per_mol) { constexpr int mol_idx = 0; int natom_pairs = max_natoms_per_mol * max_natoms_per_mol; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= max_natoms_per_mol || j >= max_natoms_per_mol) return; SpeciesT type_i = species_t[mol_idx][i]; DataT xi = pos_t[mol_idx][i][0]; DataT yi = pos_t[mol_idx][i][1]; DataT zi = pos_t[mol_idx][i][2]; SpeciesT type_j = species_t[mol_idx][j]; DataT xj = pos_t[mol_idx][j][0]; DataT yj = pos_t[mol_idx][j][1]; DataT zj = pos_t[mol_idx][j][2]; DataT delx = xj - xi; DataT dely = yj - yi; DataT delz = zj - zi; DataT Rsq = delx * delx + dely * dely + delz * delz; if (type_i != -1 && type_j != -1 && i != j) { DataT Rij = sqrt(Rsq); PairDist<DataT> d; d.Rij = Rij; d.midx = mol_idx; d.i = i; d.j = j; d_Rij[mol_idx * natom_pairs + i * max_natoms_per_mol + j] = d; } } // every block compute blocksize RIJ's gradient by column major, to avoid atomicAdd waiting template <typename DataT, typename IndexT = int> __global__ void pairwiseDistance_backward( torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> grad_radial_dist, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> grad_coord, const PairDist<DataT>* d_radialRij, IndexT nRadialRij) { int gidx = threadIdx.x * gridDim.x + blockIdx.x; if (gidx >= nRadialRij) return; PairDist<DataT> d = d_radialRij[gidx]; DataT Rij = d.Rij; int mol_idx = d.midx; int i = d.i; int j = d.j; const DataT delx = pos_t[mol_idx][j][0] - pos_t[mol_idx][i][0]; const DataT dely = pos_t[mol_idx][j][1] - pos_t[mol_idx][i][1]; const DataT delz = pos_t[mol_idx][j][2] - pos_t[mol_idx][i][2]; DataT grad_dist_coord_x = delx / Rij; DataT grad_dist_coord_y = dely / Rij; DataT grad_dist_coord_z = delz / Rij; DataT grad_radial_dist_item = grad_radial_dist[gidx]; atomicAdd(&grad_coord[mol_idx][j][0], grad_radial_dist_item * grad_dist_coord_x); atomicAdd(&grad_coord[mol_idx][j][1], grad_radial_dist_item * grad_dist_coord_y); atomicAdd(&grad_coord[mol_idx][j][2], grad_radial_dist_item * grad_dist_coord_z); atomicAdd(&grad_coord[mol_idx][i][0], -grad_radial_dist_item * grad_dist_coord_x); atomicAdd(&grad_coord[mol_idx][i][1], -grad_radial_dist_item * grad_dist_coord_y); atomicAdd(&grad_coord[mol_idx][i][2], -grad_radial_dist_item * grad_dist_coord_z); } template <typename SpeciesT, typename DataT, typename IndexT = int, int TILEX = 8, int TILEY = 4> __global__ void cuAngularAEVs( torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfA_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfZ_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaA_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> Zeta_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> aev_t, PairDist<DataT>* d_Rij, PairDist<DataT>* d_centralAtom, int* d_nPairsPerCenterAtom, int* d_centerAtomStartIdx, AEVScalarParams<DataT, IndexT> aev_params, int maxnbrs_per_atom_aligned, int angular_length_aligned, int ncentral_atoms) { extern __shared__ DataT smem[]; constexpr int threads_per_catom = TILEX * TILEY; static_assert(threads_per_catom == C10_WARP_SIZE); int gIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = gIdx / threads_per_catom; // central atom id if (cIdx >= ncentral_atoms) return; int groupIdx = threadIdx.x / threads_per_catom; int laneIdx = threadIdx.x % threads_per_catom; int ncatom_per_tpb = blockDim.x / threads_per_catom; DataT* saev = &smem[groupIdx * angular_length_aligned]; int offset = ncatom_per_tpb * angular_length_aligned; DataT* sdx = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdy = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdz = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdist = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sfc = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; int* stype = (int*)&smem[offset + groupIdx * maxnbrs_per_atom_aligned]; DataT EtaA = EtaA_t[0]; DataT Zeta = Zeta_t[0]; IndexT nShfA = ShfA_t.size(0); IndexT nShfZ = ShfZ_t.size(0); DataT Rca = aev_params.Rca; IndexT num_species = aev_params.num_species; PairDist<DataT> d = d_centralAtom[cIdx]; int start_idx = d_centerAtomStartIdx[cIdx]; int jnum = d_nPairsPerCenterAtom[cIdx]; // center atom int i = d.i; int mol_idx = d.midx; for (int iaev = laneIdx; iaev < aev_params.angular_length; iaev += threads_per_catom) { saev[iaev] = 0; } DataT xi = pos_t[mol_idx][i][0]; DataT yi = pos_t[mol_idx][i][1]; DataT zi = pos_t[mol_idx][i][2]; for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) { PairDist<DataT> dij = d_Rij[start_idx + jj]; int j = dij.j; DataT Rij = dij.Rij; SpeciesT type_j = species_t[mol_idx][j]; sdx[jj] = pos_t[mol_idx][j][0] - xi; sdy[jj] = pos_t[mol_idx][j][1] - yi; sdz[jj] = pos_t[mol_idx][j][2] - zi; stype[jj] = type_j; sdist[jj] = Rij; DataT fc_ij = 0.5 * cos(PI * Rij / Rca) + 0.5; sfc[jj] = fc_ij; } short2 tile = make_short2(laneIdx % TILEX, laneIdx / TILEX); // must sync if threads_per_catom != 32 (wrap size) to make sure shared data is ready // __syncthreads for (int jj = 0; jj < jnum; jj++) { const DataT Rij = sdist[jj]; SpeciesT type_j = stype[jj]; DataT fc_ij = sfc[jj]; for (int kk_start = jj + 1; kk_start < jnum; kk_start += threads_per_catom) { int kk = kk_start + laneIdx; DataT theta = 0; if (kk < jnum) { const DataT Rik = sdist[kk]; theta = acos(0.95 * (sdx[jj] * sdx[kk] + sdy[jj] * sdy[kk] + sdz[jj] * sdz[kk]) / (Rij * Rik)); } for (int srcLane = 0; srcLane < C10_WARP_SIZE && (kk_start + srcLane) < jnum; ++srcLane) { int kk = kk_start + srcLane; DataT theta_ijk = __shfl_sync(0xFFFFFFFF, theta, srcLane); const DataT Rik = sdist[kk]; SpeciesT type_k = stype[kk]; DataT fc_ik = sfc[kk]; DataT Rijk = (Rij + Rik) / 2; DataT fc_ijk = fc_ij * fc_ik; IndexT subaev_offset = csubaev_offsets[type_j * num_species + type_k]; for (int itheta = tile.x; itheta < nShfZ; itheta += TILEX) { DataT ShfZ = ShfZ_t[itheta]; DataT factor1 = pow((1 + cos(theta_ijk - ShfZ)) / 2, Zeta); for (int ishfr = tile.y; ishfr < nShfA; ishfr += TILEY) { DataT ShfA = ShfA_t[ishfr]; DataT factor2 = exp(-EtaA * (Rijk - ShfA) * (Rijk - ShfA)); DataT res = 2 * factor1 * factor2 * fc_ijk; saev[subaev_offset + ishfr * nShfZ + itheta] += res; } } } } } for (int iaev = laneIdx; iaev < aev_params.angular_length; iaev += threads_per_catom) { aev_t[mol_idx][i][aev_params.radial_length + iaev] = saev[iaev]; } } template <typename SpeciesT, typename DataT, typename IndexT = int, int TILEX = 8, int TILEY = 4> __global__ void // __launch_bounds__(32) cuAngularAEVs_backward( torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfA_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfZ_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaA_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> Zeta_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> grad_output, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> grad_coord, const PairDist<DataT>* d_Rij, const PairDist<DataT>* d_centralAtom, int* d_nPairsPerCenterAtom, int* d_centerAtomStartIdx, AEVScalarParams<DataT, IndexT> aev_params, int maxnbrs_per_atom_aligned, int angular_length_aligned, int ncentral_atoms) { extern __shared__ DataT smem[]; constexpr int threads_per_catom = TILEX * TILEY; static_assert(threads_per_catom == C10_WARP_SIZE); int gIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = gIdx / threads_per_catom; // central atom id if (cIdx >= ncentral_atoms) return; int groupIdx = threadIdx.x / threads_per_catom; int laneIdx = threadIdx.x % threads_per_catom; int ncatom_per_tpb = blockDim.x / threads_per_catom; // e.g. 2 catom per block DataT* sdx = &smem[groupIdx * maxnbrs_per_atom_aligned]; int offset = ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdy = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdz = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdjx_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdjy_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdjz_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdist = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sfc = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sfc_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; int* stype = (int*)&smem[offset + groupIdx * maxnbrs_per_atom_aligned]; DataT EtaA = EtaA_t[0]; DataT Zeta = Zeta_t[0]; IndexT nShfA = ShfA_t.size(0); IndexT nShfZ = ShfZ_t.size(0); DataT Rca = aev_params.Rca; IndexT num_species = aev_params.num_species; PairDist<DataT> d = d_centralAtom[cIdx]; int start_idx = d_centerAtomStartIdx[cIdx]; int jnum = d_nPairsPerCenterAtom[cIdx]; // center atom int i = d.i; int mol_idx = d.midx; DataT xi = pos_t[mol_idx][i][0]; DataT yi = pos_t[mol_idx][i][1]; DataT zi = pos_t[mol_idx][i][2]; for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) { PairDist<DataT> dij = d_Rij[start_idx + jj]; int j = dij.j; DataT Rij = dij.Rij; SpeciesT type_j = species_t[mol_idx][j]; sdx[jj] = pos_t[mol_idx][j][0] - xi; sdy[jj] = pos_t[mol_idx][j][1] - yi; sdz[jj] = pos_t[mol_idx][j][2] - zi; stype[jj] = type_j; sdist[jj] = Rij; // cutoff DataT fc_ij = 0.5 * cos(PI * Rij / Rca) + 0.5; DataT fc_ij_grad = -0.5 * (PI / Rca) * sin(PI * Rij / Rca); sfc[jj] = fc_ij; sfc_grad[jj] = fc_ij_grad; } // grad init DataT sdix_grad = 0; DataT sdiy_grad = 0; DataT sdiz_grad = 0; for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) { sdjx_grad[jj] = 0; sdjy_grad[jj] = 0; sdjz_grad[jj] = 0; } short2 tile = make_short2(laneIdx % TILEX, laneIdx / TILEX); const DataT tc = 0.95; // theta constant factor // must sync if threads_per_catom != 32 (wrap size) to make sure shared data is ready // __syncthreads for (int jj = 0; jj < jnum; jj++) { const DataT Rij = sdist[jj]; SpeciesT type_j = stype[jj]; DataT fc_ij = sfc[jj]; DataT grad_fc_ij = sfc_grad[jj]; for (int kk_start = jj + 1; kk_start < jnum; kk_start += threads_per_catom) { int kk = kk_start + laneIdx; DataT theta = 0; DataT grad_theta_vij_x = 0; DataT grad_theta_vij_y = 0; DataT grad_theta_vij_z = 0; DataT grad_theta_vik_x = 0; DataT grad_theta_vik_y = 0; DataT grad_theta_vik_z = 0; if (kk < jnum) { const DataT Rik = sdist[kk]; DataT vij_vik_dot = sdx[jj] * sdx[kk] + sdy[jj] * sdy[kk] + sdz[jj] * sdz[kk]; theta = acos(tc * vij_vik_dot / (Rij * Rik)); // grad DataT vij_factor = tc / (Rij * Rij * Rij * sqrt(-tc * tc * vij_vik_dot * vij_vik_dot / (Rij * Rij) + Rik * Rik)); DataT vik_factor = tc / (Rik * Rik * Rik * sqrt(-tc * tc * vij_vik_dot * vij_vik_dot / (Rik * Rik) + Rij * Rij)); // tricky 80ms improved grad_theta_vij_x = vij_factor * (sdx[jj] * vij_vik_dot - sdx[kk] * Rij * Rij); grad_theta_vij_y = vij_factor * (sdy[jj] * vij_vik_dot - sdy[kk] * Rij * Rij); grad_theta_vij_z = vij_factor * (sdz[jj] * vij_vik_dot - sdz[kk] * Rij * Rij); grad_theta_vik_x = vik_factor * (sdx[kk] * vij_vik_dot - sdx[jj] * Rik * Rik); grad_theta_vik_y = vik_factor * (sdy[kk] * vij_vik_dot - sdy[jj] * Rik * Rik); grad_theta_vik_z = vik_factor * (sdz[kk] * vij_vik_dot - sdz[jj] * Rik * Rik); } for (int srcLane = 0; srcLane < C10_WARP_SIZE && (kk_start + srcLane) < jnum; ++srcLane) { int kk = kk_start + srcLane; DataT theta_ijk = __shfl_sync(0xFFFFFFFF, theta, srcLane); // TODO necessary? DataT grad_theta_vij_x_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_x, srcLane); DataT grad_theta_vij_y_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_y, srcLane); DataT grad_theta_vij_z_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_z, srcLane); DataT grad_theta_vik_x_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_x, srcLane); DataT grad_theta_vik_y_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_y, srcLane); DataT grad_theta_vik_z_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_z, srcLane); const DataT Rik = sdist[kk]; SpeciesT type_k = stype[kk]; DataT fc_ik = sfc[kk]; DataT grad_fc_ik = sfc_grad[kk]; DataT Rijk = (Rij + Rik) / 2; DataT fc_ijk = fc_ij * fc_ik; IndexT subaev_offset = csubaev_offsets[type_j * num_species + type_k]; for (int itheta = tile.x; itheta < nShfZ; itheta += TILEX) { DataT ShfZ = ShfZ_t[itheta]; DataT factor1 = pow((1 + cos(theta_ijk - ShfZ)) / 2, Zeta); DataT grad_factor1_theta = 1.0 / 2.0 * Zeta * pow((1 + cos(ShfZ - theta_ijk)) / 2, Zeta - 1) * sin(ShfZ - theta_ijk); // tricky 100ms improved for (int ishfr = tile.y; ishfr < nShfA; ishfr += TILEY) { DataT ShfA = ShfA_t[ishfr]; DataT factor2 = exp(-EtaA * (Rijk - ShfA) * (Rijk - ShfA)); DataT grad_factor2_dist = -EtaA * (Rijk - ShfA) * factor2; DataT grad_output_item = grad_output[mol_idx][i][aev_params.radial_length + subaev_offset + ishfr * nShfZ + itheta]; DataT grad_vij_x = 2 * grad_output_item * (grad_factor1_theta * grad_theta_vij_x_ * factor2 * fc_ijk + factor1 * grad_factor2_dist * sdx[jj] / Rij * fc_ijk + factor1 * factor2 * fc_ik * grad_fc_ij * sdx[jj] / Rij); DataT grad_vij_y = 2 * grad_output_item * (grad_factor1_theta * grad_theta_vij_y_ * factor2 * fc_ijk + factor1 * grad_factor2_dist * sdy[jj] / Rij * fc_ijk + factor1 * factor2 * fc_ik * grad_fc_ij * sdy[jj] / Rij); DataT grad_vij_z = 2 * grad_output_item * (grad_factor1_theta * grad_theta_vij_z_ * factor2 * fc_ijk + factor1 * grad_factor2_dist * sdz[jj] / Rij * fc_ijk + factor1 * factor2 * fc_ik * grad_fc_ij * sdz[jj] / Rij); DataT grad_vik_x = 2 * grad_output_item * (grad_factor1_theta * grad_theta_vik_x_ * factor2 * fc_ijk + factor1 * grad_factor2_dist * sdx[kk] / Rik * fc_ijk + factor1 * factor2 * fc_ij * grad_fc_ik * sdx[kk] / Rik); DataT grad_vik_y = 2 * grad_output_item * (grad_factor1_theta * grad_theta_vik_y_ * factor2 * fc_ijk + factor1 * grad_factor2_dist * sdy[kk] / Rik * fc_ijk + factor1 * factor2 * fc_ij * grad_fc_ik * sdy[kk] / Rik); DataT grad_vik_z = 2 * grad_output_item * (grad_factor1_theta * grad_theta_vik_z_ * factor2 * fc_ijk + factor1 * grad_factor2_dist * sdz[kk] / Rik * fc_ijk + factor1 * factor2 * fc_ij * grad_fc_ik * sdz[kk] / Rik); sdix_grad += (-grad_vij_x - grad_vik_x); sdiy_grad += (-grad_vij_y - grad_vik_y); sdiz_grad += (-grad_vij_z - grad_vik_z); for (int offset = 16; offset > 0; offset /= 2) { grad_vij_x += __shfl_down_sync(0xFFFFFFFF, grad_vij_x, offset); grad_vij_y += __shfl_down_sync(0xFFFFFFFF, grad_vij_y, offset); grad_vij_z += __shfl_down_sync(0xFFFFFFFF, grad_vij_z, offset); grad_vik_x += __shfl_down_sync(0xFFFFFFFF, grad_vik_x, offset); grad_vik_y += __shfl_down_sync(0xFFFFFFFF, grad_vik_y, offset); grad_vik_z += __shfl_down_sync(0xFFFFFFFF, grad_vik_z, offset); } if (laneIdx == 0) { sdjx_grad[jj] += grad_vij_x; sdjy_grad[jj] += grad_vij_y; sdjz_grad[jj] += grad_vij_z; sdjx_grad[kk] += grad_vik_x; sdjy_grad[kk] += grad_vik_y; sdjz_grad[kk] += grad_vik_z; } } } } } } int atomi_idx = i; atomicAdd(&grad_coord[mol_idx][atomi_idx][0], sdix_grad); atomicAdd(&grad_coord[mol_idx][atomi_idx][1], sdiy_grad); atomicAdd(&grad_coord[mol_idx][atomi_idx][2], sdiz_grad); for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) { int atomj_idx = d_Rij[start_idx + jj].j; atomicAdd(&grad_coord[mol_idx][atomj_idx][0], sdjx_grad[jj]); atomicAdd(&grad_coord[mol_idx][atomj_idx][1], sdjy_grad[jj]); atomicAdd(&grad_coord[mol_idx][atomj_idx][2], sdjz_grad[jj]); } } template <typename SpeciesT, typename DataT, int THREADS_PER_RIJ> __global__ void cuRadialAEVs( torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfR_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaR_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> aev_t, PairDist<DataT>* d_Rij, AEVScalarParams<DataT, int> aev_params, int nRadialRij) { int gidx = blockIdx.x * blockDim.x + threadIdx.x; int idx = gidx / THREADS_PER_RIJ; int nShfR = ShfR_t.size(0); DataT EtaR = EtaR_t[0]; if (idx >= nRadialRij) return; int laneIdx = threadIdx.x % THREADS_PER_RIJ; PairDist<DataT> d = d_Rij[idx]; DataT Rij = d.Rij; int mol_idx = d.midx; int i = d.i; int j = d.j; SpeciesT type_j = species_t[mol_idx][j]; DataT fc = 0.5 * cos(PI * Rij / aev_params.Rcr) + 0.5; for (int ishfr = laneIdx; ishfr < nShfR; ishfr += THREADS_PER_RIJ) { DataT ShfR = ShfR_t[ishfr]; DataT GmR = 0.25 * exp(-EtaR * (Rij - ShfR) * (Rij - ShfR)) * fc; atomicAdd(&aev_t[mol_idx][i][type_j * aev_params.radial_sublength + ishfr], GmR); } } // every <THREADS_PER_RIJ> threads take care of 1 RIJ, and iterate <nShfR / THREADS_PER_RIJ> times template <typename SpeciesT, typename DataT, int THREADS_PER_RIJ> __global__ void cuRadialAEVs_backward( torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfR_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaR_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> grad_output, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> grad_radial_dist, const PairDist<DataT>* d_Rij, AEVScalarParams<DataT, int> aev_params, int nRadialRij) { int gidx = blockIdx.x * blockDim.x + threadIdx.x; int idx = gidx / THREADS_PER_RIJ; int nShfR = ShfR_t.size(0); DataT EtaR = EtaR_t[0]; if (idx >= nRadialRij) return; int laneIdx = threadIdx.x % THREADS_PER_RIJ; PairDist<DataT> d = d_Rij[idx]; DataT Rij = d.Rij; int mol_idx = d.midx; int i = d.i; int j = d.j; SpeciesT type_j = species_t[mol_idx][j]; DataT fc = 0.5 * cos(PI * Rij / aev_params.Rcr) + 0.5; DataT fc_grad = -0.5 * (PI / aev_params.Rcr) * sin(PI * Rij / aev_params.Rcr); for (int ishfr = laneIdx; ishfr < nShfR; ishfr += THREADS_PER_RIJ) { DataT ShfR = ShfR_t[ishfr]; DataT GmR = 0.25 * exp(-EtaR * (Rij - ShfR) * (Rij - ShfR)); DataT GmR_grad = -EtaR * (-2 * ShfR + 2 * Rij) * GmR; DataT grad_output_item = grad_output[mol_idx][i][type_j * aev_params.radial_sublength + ishfr]; DataT grad_radial_dist_item = grad_output_item * (GmR_grad * fc + GmR * fc_grad); atomicAdd(&grad_radial_dist[idx], grad_radial_dist_item); } } template <typename DataT> void cubScan(const DataT* d_in, DataT* d_out, int num_items, hipStream_t stream) { auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get(); // Determine temporary device storage requirements void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream); // Allocate temporary storage auto buffer_tmp = allocator.allocate(temp_storage_bytes); d_temp_storage = buffer_tmp.get(); // Run exclusive prefix sum hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream); } template <typename DataT, typename IndexT> int cubEncode( const DataT* d_in, DataT* d_unique_out, IndexT* d_counts_out, int num_items, int* d_num_runs_out, hipStream_t stream) { auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get(); // Determine temporary device storage requirements void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; hipcub::DeviceRunLengthEncode::Encode( d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items, stream); // Allocate temporary storage auto buffer_tmp = allocator.allocate(temp_storage_bytes); d_temp_storage = buffer_tmp.get(); // Run encoding hipcub::DeviceRunLengthEncode::Encode( d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items, stream); int num_selected = 0; hipMemcpyAsync(&num_selected, d_num_runs_out, sizeof(int), hipMemcpyDefault, stream); hipStreamSynchronize(stream); return num_selected; } template <typename DataT, typename LambdaOpT> int cubDeviceSelect( const DataT* d_in, DataT* d_out, int num_items, int* d_num_selected_out, LambdaOpT select_op, hipStream_t stream) { auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get(); // Determine temporary device storage requirements void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; hipcub::DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op); // Allocate temporary storage auto buffer_tmp = allocator.allocate(temp_storage_bytes); d_temp_storage = buffer_tmp.get(); // Run selection hipcub::DeviceSelect::If( d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream); int num_selected = 0; hipMemcpyAsync(&num_selected, d_num_selected_out, sizeof(int), hipMemcpyDefault, stream); hipStreamSynchronize(stream); return num_selected; } template <typename DataT> DataT cubMax(const DataT* d_in, int num_items, DataT* d_out, hipStream_t stream) { auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get(); // Determine temporary device storage requirements void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream); // Allocate temporary storage auto buffer_tmp = allocator.allocate(temp_storage_bytes); d_temp_storage = buffer_tmp.get(); // Run min-reduction hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream); int maxVal = 0; hipMemcpyAsync(&maxVal, d_out, sizeof(DataT), hipMemcpyDefault, stream); hipStreamSynchronize(stream); return maxVal; } void initConsts(AEVScalarParams<float>& aev_params, hipStream_t stream) { int num_species = aev_params.num_species; assert(num_species <= MAX_NSPECIES); // precompute the aev offsets and load to constand memory int* subaev_offsets = new int[num_species * num_species]; for (int t = 0; t < num_species; ++t) { int offset = 0; for (int s = 0; s < num_species; s++) { if (t < num_species - s) { subaev_offsets[s * num_species + s + t] = aev_params.angular_sublength * (offset + t); subaev_offsets[(s + t) * num_species + s] = aev_params.angular_sublength * (offset + t); } offset += num_species - s; } } hipMemcpyToSymbolAsync( csubaev_offsets, subaev_offsets, sizeof(int) * num_species * num_species, 0, hipMemcpyDefault, stream); delete[] subaev_offsets; } struct Result { Tensor aev_t; AEVScalarParams<float> aev_params; Tensor tensor_Rij; Tensor tensor_radialRij; Tensor tensor_angularRij; int total_natom_pairs; int nRadialRij; int nAngularRij; Tensor tensor_centralAtom; Tensor tensor_numPairsPerCenterAtom; Tensor tensor_centerAtomStartIdx; int maxnbrs_per_atom_aligned; int angular_length_aligned; int ncenter_atoms; }; // NOTE: assumes size of EtaA_t = Zeta_t = EtaR_t = 1 template <typename ScalarRealT = float> Result cuaev_forward( const Tensor& coordinates_t, const Tensor& species_t, double Rcr_, double Rca_, const Tensor& EtaR_t, const Tensor& ShfR_t, const Tensor& EtaA_t, const Tensor& Zeta_t, const Tensor& ShfA_t, const Tensor& ShfZ_t, int64_t num_species_) { TORCH_CHECK( (species_t.dtype() == torch::kInt32) && (coordinates_t.dtype() == torch::kFloat32), "Unsupported input type"); TORCH_CHECK( EtaR_t.size(0) == 1 || EtaA_t.size(0) == 1 || Zeta_t.size(0) == 1, "cuda extension is currently not supported for the specified " "configuration"); ScalarRealT Rcr = Rcr_; ScalarRealT Rca = Rca_; int num_species = num_species_; const int n_molecules = species_t.size(0); const int max_natoms_per_mol = species_t.size(1); AEVScalarParams<float> aev_params; aev_params.Rca = Rca; aev_params.Rcr = Rcr; aev_params.num_species = num_species; aev_params.radial_sublength = EtaR_t.size(0) * ShfR_t.size(0); aev_params.radial_length = aev_params.radial_sublength * num_species; aev_params.angular_sublength = EtaA_t.size(0) * Zeta_t.size(0) * ShfA_t.size(0) * ShfZ_t.size(0); aev_params.angular_length = aev_params.angular_sublength * (num_species * (num_species + 1) / 2); int aev_length = aev_params.radial_length + aev_params.angular_length; auto aev_t = torch::zeros({n_molecules, max_natoms_per_mol, aev_length}, coordinates_t.options()); if (species_t.numel() == 0) { return {aev_t, aev_params, Tensor(), Tensor(), Tensor(), 0, 0, 0}; } hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto thrust_allocator = THCThrustAllocator(at::globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(thrust_allocator).on(stream); auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get(); // precompute the aev offsets and load to constand memory initConsts(aev_params, stream); // buffer to store all the pairwise distance (Rij) auto total_natom_pairs = n_molecules * max_natoms_per_mol * max_natoms_per_mol; auto d_options = torch::dtype(torch::kUInt8).device(coordinates_t.device()); Tensor tensor_Rij = torch::empty(sizeof(PairDist<float>) * total_natom_pairs, d_options); PairDist<float>* d_Rij = (PairDist<float>*)tensor_Rij.data_ptr(); // init all Rij to inf PairDist<float> init; init.Rij = std::numeric_limits<float>::infinity(); thrust::fill(policy, d_Rij, d_Rij + total_natom_pairs, init); // buffer to store all the pairwise distance that is needed for Radial AEV // computation Tensor tensor_radialRij = torch::empty(sizeof(PairDist<float>) * total_natom_pairs, d_options); PairDist<float>* d_radialRij = (PairDist<float>*)tensor_radialRij.data_ptr(); auto buffer_count = allocator.allocate(sizeof(int)); int* d_count_out = (int*)buffer_count.get(); const int block_size = 64; dim3 block(8, 8, 1); if (n_molecules == 1) { int tileWidth = 32; int tilesPerRow = (max_natoms_per_mol + tileWidth - 1) / tileWidth; dim3 block(tileWidth, tileWidth, 1); dim3 grid(tilesPerRow, tilesPerRow, 1); hipLaunchKernelGGL(( pairwiseDistanceSingleMolecule), dim3(grid), dim3(block), 0, stream, species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), d_Rij, max_natoms_per_mol); } else { dim3 block(8, 8, 1); // Compute pairwise distance (Rij) for all atom pairs in a molecule // maximum 4096 atoms, which needs 49152 byte (48 kb) of shared memory // TODO: the kernel is not optimized for batched huge molecule (max_natoms_per_mol > 1000) hipLaunchKernelGGL(( pairwiseDistance), dim3(n_molecules), dim3(block), sizeof(float) * max_natoms_per_mol * 3, stream, species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), d_Rij, max_natoms_per_mol); } // Extract Rijs that is needed for RadialAEV comptuation i.e. all the Rij <= Rcr int nRadialRij = cubDeviceSelect( d_Rij, d_radialRij, total_natom_pairs, d_count_out, [=] __device__(const PairDist<float> d) { return d.Rij <= Rcr; }, stream); int nblocks = (nRadialRij * 8 + block_size - 1) / block_size; hipLaunchKernelGGL(( cuRadialAEVs<int, float, 8>), dim3(nblocks), dim3(block_size), 0, stream, species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), aev_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), d_radialRij, aev_params, nRadialRij); // reuse buffer allocated for all Rij // d_angularRij will store all the Rij required in Angular AEV computation Tensor tensor_angularRij = torch::empty(sizeof(PairDist<float>) * nRadialRij, d_options); PairDist<float>* d_angularRij = (PairDist<float>*)tensor_angularRij.data_ptr(); // Extract Rijs that is needed for AngularAEV comptuation i.e. all the Rij // <= Rca int nAngularRij = cubDeviceSelect( d_radialRij, d_angularRij, nRadialRij, d_count_out, [=] __device__(const PairDist<float> d) { return d.Rij <= Rca; }, stream); Tensor tensor_centralAtom = torch::empty(sizeof(PairDist<float>) * nAngularRij, d_options); PairDist<float>* d_centralAtom = (PairDist<float>*)tensor_centralAtom.data_ptr(); Tensor tensor_numPairsPerCenterAtom = torch::empty(sizeof(int) * nAngularRij, d_options); int* d_numPairsPerCenterAtom = (int*)tensor_numPairsPerCenterAtom.data_ptr(); // group by center atom int ncenter_atoms = cubEncode(d_angularRij, d_centralAtom, d_numPairsPerCenterAtom, nAngularRij, d_count_out, stream); Tensor tensor_centerAtomStartIdx = torch::empty(sizeof(int) * ncenter_atoms, d_options); int* d_centerAtomStartIdx = (int*)tensor_centerAtomStartIdx.data_ptr(); cubScan(d_numPairsPerCenterAtom, d_centerAtomStartIdx, ncenter_atoms, stream); { const int nthreads_per_catom = 32; const int nblocks_angAEV = (ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size; auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) { int sm_aev = sizeof(float) * align<4>(aev_params.angular_length); // (angular_length / 4 + 1) * 4 int sxyz = sizeof(float) * max_nbrs * 3; int sRij = sizeof(float) * max_nbrs; int sfc = sizeof(float) * max_nbrs; int sj = sizeof(int) * max_nbrs; return (sm_aev + sxyz + sRij + sfc + sj) * ncatom_per_tpb; }; int maxNbrsPerCenterAtom = cubMax(d_numPairsPerCenterAtom, ncenter_atoms, d_count_out, stream); int maxnbrs_per_atom_aligned = align<4>(maxNbrsPerCenterAtom); int smem_size_aligned = smem_size(maxnbrs_per_atom_aligned, block_size / nthreads_per_catom); int angular_length_aligned = align<4>(aev_params.angular_length); hipLaunchKernelGGL(( cuAngularAEVs), dim3(nblocks_angAEV), dim3(block_size), smem_size_aligned, stream, species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), aev_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), d_angularRij, d_centralAtom, d_numPairsPerCenterAtom, d_centerAtomStartIdx, aev_params, maxnbrs_per_atom_aligned, angular_length_aligned, ncenter_atoms); return {aev_t, aev_params, tensor_Rij, tensor_radialRij, tensor_angularRij, total_natom_pairs, nRadialRij, nAngularRij, tensor_centralAtom, tensor_numPairsPerCenterAtom, tensor_centerAtomStartIdx, maxnbrs_per_atom_aligned, angular_length_aligned, ncenter_atoms}; } } Tensor cuaev_backward( const Tensor& grad_output, const Tensor& coordinates_t, const Tensor& species_t, const AEVScalarParams<float>& aev_params, const Tensor& EtaR_t, const Tensor& ShfR_t, const Tensor& EtaA_t, const Tensor& Zeta_t, const Tensor& ShfA_t, const Tensor& ShfZ_t, const Tensor& tensor_Rij, int total_natom_pairs, const Tensor& tensor_radialRij, int nRadialRij, const Tensor& tensor_angularRij, int nAngularRij, const Tensor& tensor_centralAtom, const Tensor& tensor_numPairsPerCenterAtom, const Tensor& tensor_centerAtomStartIdx, int maxnbrs_per_atom_aligned, int angular_length_aligned, int ncenter_atoms) { using namespace torch::indexing; const int n_molecules = coordinates_t.size(0); const int max_natoms_per_mol = coordinates_t.size(1); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto grad_coord = torch::zeros(coordinates_t.sizes(), coordinates_t.options().requires_grad(false)); // [2, 5, 3] auto grad_output_radial = grad_output.index({Ellipsis, Slice(None, aev_params.radial_length)}); // [2, 5, 64] auto grad_output_angular = grad_output.index({Ellipsis, Slice(aev_params.radial_length, None)}); // [2, 5, 320] PairDist<float>* d_Rij = (PairDist<float>*)tensor_Rij.data_ptr(); PairDist<float>* d_radialRij = (PairDist<float>*)tensor_radialRij.data_ptr(); PairDist<float>* d_angularRij = (PairDist<float>*)tensor_angularRij.data_ptr(); PairDist<float>* d_centralAtom = (PairDist<float>*)tensor_centralAtom.data_ptr(); int* d_numPairsPerCenterAtom = (int*)tensor_numPairsPerCenterAtom.data_ptr(); int* d_centerAtomStartIdx = (int*)tensor_centerAtomStartIdx.data_ptr(); Tensor grad_radial_dist = torch::zeros(nRadialRij, coordinates_t.options().requires_grad(false)); int block_size = 64; int nblocks = (nRadialRij * 8 + block_size - 1) / block_size; hipLaunchKernelGGL(( cuRadialAEVs_backward<int, float, 8>), dim3(nblocks), dim3(block_size), 0, stream, species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), grad_output.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), grad_radial_dist.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), d_radialRij, aev_params, nRadialRij); // For best result, block_size should match average molecule size (no padding) to avoid atomicAdd nblocks = (nRadialRij + block_size - 1) / block_size; hipLaunchKernelGGL(( pairwiseDistance_backward), dim3(nblocks), dim3(block_size), 0, stream, coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), grad_radial_dist.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), grad_coord.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), d_radialRij, nRadialRij); auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) { int sxyz = sizeof(float) * max_nbrs * 3; int sj_xyz_grad = sizeof(float) * max_nbrs * 3; int sRij = sizeof(float) * max_nbrs; int sfc = sizeof(float) * max_nbrs; int sfc_grad = sizeof(float) * max_nbrs; int sj = sizeof(int) * max_nbrs; return (sxyz + sj_xyz_grad + sRij + sfc + sfc_grad + sj) * ncatom_per_tpb; }; block_size = 32; const int nthreads_per_catom = 32; const int nblocks_angAEV = (ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size; int smem_size_aligned = smem_size(maxnbrs_per_atom_aligned, block_size / nthreads_per_catom); Tensor grad_angular_coord = torch::zeros({nAngularRij, 3}, coordinates_t.options().requires_grad(false)); hipLaunchKernelGGL(( cuAngularAEVs_backward), dim3(nblocks_angAEV), dim3(block_size), smem_size_aligned, stream, species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), grad_output.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), grad_coord.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), d_angularRij, d_centralAtom, d_numPairsPerCenterAtom, d_centerAtomStartIdx, aev_params, maxnbrs_per_atom_aligned, angular_length_aligned, ncenter_atoms); return grad_coord; } #define AEV_INPUT \ const Tensor &coordinates_t, const Tensor &species_t, double Rcr_, double Rca_, const Tensor &EtaR_t, \ const Tensor &ShfR_t, const Tensor &EtaA_t, const Tensor &Zeta_t, const Tensor &ShfA_t, const Tensor &ShfZ_t, \ int64_t num_species_ Tensor cuaev_cuda(AEV_INPUT) { Result res = cuaev_forward<float>( coordinates_t, species_t, Rcr_, Rca_, EtaR_t, ShfR_t, EtaA_t, Zeta_t, ShfA_t, ShfZ_t, num_species_); return res.aev_t; } class CuaevAutograd : public torch::autograd::Function<CuaevAutograd> { public: static Tensor forward(torch::autograd::AutogradContext* ctx, AEV_INPUT) { at::AutoNonVariableTypeMode g; Result res = cuaev_forward<float>( coordinates_t, species_t, Rcr_, Rca_, EtaR_t, ShfR_t, EtaA_t, Zeta_t, ShfA_t, ShfZ_t, num_species_); if (coordinates_t.requires_grad()) { ctx->save_for_backward({coordinates_t, species_t, res.tensor_Rij, res.tensor_radialRij, res.tensor_angularRij, EtaR_t, ShfR_t, EtaA_t, Zeta_t, ShfA_t, ShfZ_t, res.tensor_centralAtom, res.tensor_numPairsPerCenterAtom, res.tensor_centerAtomStartIdx}); ctx->saved_data["aev_params"] = res.aev_params; ctx->saved_data["int_list"] = c10::List<int64_t>{res.total_natom_pairs, res.nRadialRij, res.nAngularRij, res.maxnbrs_per_atom_aligned, res.angular_length_aligned, res.ncenter_atoms}; } return res.aev_t; } static tensor_list backward(torch::autograd::AutogradContext* ctx, tensor_list grad_outputs) { auto saved = ctx->get_saved_variables(); auto coordinates_t = saved[0], species_t = saved[1]; auto tensor_Rij = saved[2], tensor_radialRij = saved[3], tensor_angularRij = saved[4]; auto EtaR_t = saved[5], ShfR_t = saved[6], EtaA_t = saved[7], Zeta_t = saved[8], ShfA_t = saved[9], ShfZ_t = saved[10]; auto tensor_centralAtom = saved[11], tensor_numPairsPerCenterAtom = saved[12], tensor_centerAtomStartIdx = saved[13]; AEVScalarParams<float> aev_params(ctx->saved_data["aev_params"]); c10::List<int64_t> int_list = ctx->saved_data["int_list"].toIntList(); int total_natom_pairs = int_list[0], nRadialRij = int_list[1], nAngularRij = int_list[2]; int maxnbrs_per_atom_aligned = int_list[3], angular_length_aligned = int_list[4]; int ncenter_atoms = int_list[5]; Tensor grad_coord = cuaev_backward( grad_outputs[0], coordinates_t, species_t, aev_params, EtaR_t, ShfR_t, EtaA_t, Zeta_t, ShfA_t, ShfZ_t, tensor_Rij, total_natom_pairs, tensor_radialRij, nRadialRij, tensor_angularRij, nAngularRij, tensor_centralAtom, tensor_numPairsPerCenterAtom, tensor_centerAtomStartIdx, maxnbrs_per_atom_aligned, angular_length_aligned, ncenter_atoms); return { grad_coord, Tensor(), Tensor(), Tensor(), Tensor(), Tensor(), Tensor(), Tensor(), Tensor(), Tensor(), Tensor()}; } }; Tensor cuaev_autograd(AEV_INPUT) { return CuaevAutograd::apply( coordinates_t, species_t, Rcr_, Rca_, EtaR_t, ShfR_t, EtaA_t, Zeta_t, ShfA_t, ShfZ_t, num_species_); } TORCH_LIBRARY(cuaev, m) { m.def("cuComputeAEV", cuaev_cuda); } TORCH_LIBRARY_IMPL(cuaev, CUDA, m) { m.impl("cuComputeAEV", cuaev_cuda); } TORCH_LIBRARY_IMPL(cuaev, Autograd, m) { m.impl("cuComputeAEV", cuaev_autograd); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {}
1280d3f56d467c43f9f9d2be2d33ccffc7d7d356.cu
#include <thrust/equal.h> #include <torch/extension.h> #include <cub/cub.cuh> #include <vector> #include <ATen/Context.h> #include <THC/THC.h> #include <c10/cuda/CUDACachingAllocator.h> #include <THC/THCThrustAllocator.cuh> #define PI 3.141592653589793 using torch::Tensor; using torch::autograd::tensor_list; template <typename DataT, typename IndexT = int> struct AEVScalarParams { DataT Rcr; DataT Rca; IndexT radial_sublength; IndexT radial_length; IndexT angular_sublength; IndexT angular_length; IndexT num_species; AEVScalarParams() = default; AEVScalarParams(const torch::IValue& aev_params_ivalue) { c10::intrusive_ptr<c10::ivalue::Tuple> aev_params_tuple_ptr = aev_params_ivalue.toTuple(); auto aev_params_tuple = aev_params_tuple_ptr->elements(); Rcr = static_cast<DataT>(aev_params_tuple[0].toDouble()); Rca = static_cast<DataT>(aev_params_tuple[1].toDouble()); radial_sublength = static_cast<IndexT>(aev_params_tuple[2].toInt()); radial_length = static_cast<IndexT>(aev_params_tuple[3].toInt()); angular_sublength = static_cast<IndexT>(aev_params_tuple[4].toInt()); angular_length = static_cast<IndexT>(aev_params_tuple[5].toInt()); num_species = static_cast<IndexT>(aev_params_tuple[6].toInt()); } operator torch::IValue() { return torch::IValue(std::make_tuple( (double)Rcr, (double)Rca, radial_sublength, radial_length, angular_sublength, angular_length, num_species)); } }; #define MAX_NSPECIES 10 __constant__ int csubaev_offsets[MAX_NSPECIES * MAX_NSPECIES]; template <typename DataT> struct PairDist { DataT Rij; int midx; short i; short j; }; // used to group Rijs by atom id template <typename DataT> __host__ __device__ bool operator==(const PairDist<DataT>& lhs, const PairDist<DataT>& rhs) { return lhs.midx == rhs.midx && lhs.i == rhs.i; } /// Alignment of memory. Must be a power of two /// \tparam boundary Boundary to align to (NOTE: must be power of 2) /// \param value Input value that is to be aligned /// \return Value aligned to boundary template <int32_t boundary> __host__ __device__ __forceinline__ int align(const int& value) { static_assert((boundary & (boundary - 1)) == 0, "Boundary for align must be power of 2"); return (value + boundary) & ~(boundary - 1); } template <typename SpeciesT, typename DataT, typename IndexT = int> __global__ void pairwiseDistance( torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t, PairDist<DataT>* d_Rij, IndexT max_natoms_per_mol) { extern __shared__ DataT spos[]; DataT* sx = &spos[0]; DataT* sy = &spos[max_natoms_per_mol]; DataT* sz = &spos[2 * max_natoms_per_mol]; int mol_idx = blockIdx.x; int tidx = threadIdx.y * blockDim.x + threadIdx.x; for (int i = tidx; i < max_natoms_per_mol; i += blockDim.x * blockDim.y) { sx[i] = pos_t[mol_idx][i][0]; sy[i] = pos_t[mol_idx][i][1]; sz[i] = pos_t[mol_idx][i][2]; } __syncthreads(); int natom_pairs = max_natoms_per_mol * max_natoms_per_mol; for (int i = threadIdx.y; i < max_natoms_per_mol; i += blockDim.y) { SpeciesT type_i = species_t[mol_idx][i]; DataT xi = sx[i]; DataT yi = sy[i]; DataT zi = sz[i]; for (int j = threadIdx.x; j < max_natoms_per_mol; j += blockDim.x) { SpeciesT type_j = species_t[mol_idx][j]; const DataT xj = sx[j]; const DataT yj = sy[j]; const DataT zj = sz[j]; const DataT delx = xj - xi; const DataT dely = yj - yi; const DataT delz = zj - zi; const DataT Rsq = delx * delx + dely * dely + delz * delz; if (type_i != -1 && type_j != -1 && i != j) { DataT Rij = sqrt(Rsq); PairDist<DataT> d; d.Rij = Rij; d.midx = mol_idx; d.i = i; d.j = j; d_Rij[mol_idx * natom_pairs + i * max_natoms_per_mol + j] = d; } } } } template <typename SpeciesT, typename DataT, typename IndexT = int> __global__ void pairwiseDistanceSingleMolecule( torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t, PairDist<DataT>* d_Rij, IndexT max_natoms_per_mol) { constexpr int mol_idx = 0; int natom_pairs = max_natoms_per_mol * max_natoms_per_mol; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= max_natoms_per_mol || j >= max_natoms_per_mol) return; SpeciesT type_i = species_t[mol_idx][i]; DataT xi = pos_t[mol_idx][i][0]; DataT yi = pos_t[mol_idx][i][1]; DataT zi = pos_t[mol_idx][i][2]; SpeciesT type_j = species_t[mol_idx][j]; DataT xj = pos_t[mol_idx][j][0]; DataT yj = pos_t[mol_idx][j][1]; DataT zj = pos_t[mol_idx][j][2]; DataT delx = xj - xi; DataT dely = yj - yi; DataT delz = zj - zi; DataT Rsq = delx * delx + dely * dely + delz * delz; if (type_i != -1 && type_j != -1 && i != j) { DataT Rij = sqrt(Rsq); PairDist<DataT> d; d.Rij = Rij; d.midx = mol_idx; d.i = i; d.j = j; d_Rij[mol_idx * natom_pairs + i * max_natoms_per_mol + j] = d; } } // every block compute blocksize RIJ's gradient by column major, to avoid atomicAdd waiting template <typename DataT, typename IndexT = int> __global__ void pairwiseDistance_backward( torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> grad_radial_dist, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> grad_coord, const PairDist<DataT>* d_radialRij, IndexT nRadialRij) { int gidx = threadIdx.x * gridDim.x + blockIdx.x; if (gidx >= nRadialRij) return; PairDist<DataT> d = d_radialRij[gidx]; DataT Rij = d.Rij; int mol_idx = d.midx; int i = d.i; int j = d.j; const DataT delx = pos_t[mol_idx][j][0] - pos_t[mol_idx][i][0]; const DataT dely = pos_t[mol_idx][j][1] - pos_t[mol_idx][i][1]; const DataT delz = pos_t[mol_idx][j][2] - pos_t[mol_idx][i][2]; DataT grad_dist_coord_x = delx / Rij; DataT grad_dist_coord_y = dely / Rij; DataT grad_dist_coord_z = delz / Rij; DataT grad_radial_dist_item = grad_radial_dist[gidx]; atomicAdd(&grad_coord[mol_idx][j][0], grad_radial_dist_item * grad_dist_coord_x); atomicAdd(&grad_coord[mol_idx][j][1], grad_radial_dist_item * grad_dist_coord_y); atomicAdd(&grad_coord[mol_idx][j][2], grad_radial_dist_item * grad_dist_coord_z); atomicAdd(&grad_coord[mol_idx][i][0], -grad_radial_dist_item * grad_dist_coord_x); atomicAdd(&grad_coord[mol_idx][i][1], -grad_radial_dist_item * grad_dist_coord_y); atomicAdd(&grad_coord[mol_idx][i][2], -grad_radial_dist_item * grad_dist_coord_z); } template <typename SpeciesT, typename DataT, typename IndexT = int, int TILEX = 8, int TILEY = 4> __global__ void cuAngularAEVs( torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfA_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfZ_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaA_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> Zeta_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> aev_t, PairDist<DataT>* d_Rij, PairDist<DataT>* d_centralAtom, int* d_nPairsPerCenterAtom, int* d_centerAtomStartIdx, AEVScalarParams<DataT, IndexT> aev_params, int maxnbrs_per_atom_aligned, int angular_length_aligned, int ncentral_atoms) { extern __shared__ DataT smem[]; constexpr int threads_per_catom = TILEX * TILEY; static_assert(threads_per_catom == C10_WARP_SIZE); int gIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = gIdx / threads_per_catom; // central atom id if (cIdx >= ncentral_atoms) return; int groupIdx = threadIdx.x / threads_per_catom; int laneIdx = threadIdx.x % threads_per_catom; int ncatom_per_tpb = blockDim.x / threads_per_catom; DataT* saev = &smem[groupIdx * angular_length_aligned]; int offset = ncatom_per_tpb * angular_length_aligned; DataT* sdx = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdy = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdz = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdist = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sfc = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; int* stype = (int*)&smem[offset + groupIdx * maxnbrs_per_atom_aligned]; DataT EtaA = EtaA_t[0]; DataT Zeta = Zeta_t[0]; IndexT nShfA = ShfA_t.size(0); IndexT nShfZ = ShfZ_t.size(0); DataT Rca = aev_params.Rca; IndexT num_species = aev_params.num_species; PairDist<DataT> d = d_centralAtom[cIdx]; int start_idx = d_centerAtomStartIdx[cIdx]; int jnum = d_nPairsPerCenterAtom[cIdx]; // center atom int i = d.i; int mol_idx = d.midx; for (int iaev = laneIdx; iaev < aev_params.angular_length; iaev += threads_per_catom) { saev[iaev] = 0; } DataT xi = pos_t[mol_idx][i][0]; DataT yi = pos_t[mol_idx][i][1]; DataT zi = pos_t[mol_idx][i][2]; for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) { PairDist<DataT> dij = d_Rij[start_idx + jj]; int j = dij.j; DataT Rij = dij.Rij; SpeciesT type_j = species_t[mol_idx][j]; sdx[jj] = pos_t[mol_idx][j][0] - xi; sdy[jj] = pos_t[mol_idx][j][1] - yi; sdz[jj] = pos_t[mol_idx][j][2] - zi; stype[jj] = type_j; sdist[jj] = Rij; DataT fc_ij = 0.5 * cos(PI * Rij / Rca) + 0.5; sfc[jj] = fc_ij; } short2 tile = make_short2(laneIdx % TILEX, laneIdx / TILEX); // must sync if threads_per_catom != 32 (wrap size) to make sure shared data is ready // __syncthreads for (int jj = 0; jj < jnum; jj++) { const DataT Rij = sdist[jj]; SpeciesT type_j = stype[jj]; DataT fc_ij = sfc[jj]; for (int kk_start = jj + 1; kk_start < jnum; kk_start += threads_per_catom) { int kk = kk_start + laneIdx; DataT theta = 0; if (kk < jnum) { const DataT Rik = sdist[kk]; theta = acos(0.95 * (sdx[jj] * sdx[kk] + sdy[jj] * sdy[kk] + sdz[jj] * sdz[kk]) / (Rij * Rik)); } for (int srcLane = 0; srcLane < C10_WARP_SIZE && (kk_start + srcLane) < jnum; ++srcLane) { int kk = kk_start + srcLane; DataT theta_ijk = __shfl_sync(0xFFFFFFFF, theta, srcLane); const DataT Rik = sdist[kk]; SpeciesT type_k = stype[kk]; DataT fc_ik = sfc[kk]; DataT Rijk = (Rij + Rik) / 2; DataT fc_ijk = fc_ij * fc_ik; IndexT subaev_offset = csubaev_offsets[type_j * num_species + type_k]; for (int itheta = tile.x; itheta < nShfZ; itheta += TILEX) { DataT ShfZ = ShfZ_t[itheta]; DataT factor1 = pow((1 + cos(theta_ijk - ShfZ)) / 2, Zeta); for (int ishfr = tile.y; ishfr < nShfA; ishfr += TILEY) { DataT ShfA = ShfA_t[ishfr]; DataT factor2 = exp(-EtaA * (Rijk - ShfA) * (Rijk - ShfA)); DataT res = 2 * factor1 * factor2 * fc_ijk; saev[subaev_offset + ishfr * nShfZ + itheta] += res; } } } } } for (int iaev = laneIdx; iaev < aev_params.angular_length; iaev += threads_per_catom) { aev_t[mol_idx][i][aev_params.radial_length + iaev] = saev[iaev]; } } template <typename SpeciesT, typename DataT, typename IndexT = int, int TILEX = 8, int TILEY = 4> __global__ void // __launch_bounds__(32) cuAngularAEVs_backward( torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfA_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfZ_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaA_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> Zeta_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> grad_output, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> grad_coord, const PairDist<DataT>* d_Rij, const PairDist<DataT>* d_centralAtom, int* d_nPairsPerCenterAtom, int* d_centerAtomStartIdx, AEVScalarParams<DataT, IndexT> aev_params, int maxnbrs_per_atom_aligned, int angular_length_aligned, int ncentral_atoms) { extern __shared__ DataT smem[]; constexpr int threads_per_catom = TILEX * TILEY; static_assert(threads_per_catom == C10_WARP_SIZE); int gIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = gIdx / threads_per_catom; // central atom id if (cIdx >= ncentral_atoms) return; int groupIdx = threadIdx.x / threads_per_catom; int laneIdx = threadIdx.x % threads_per_catom; int ncatom_per_tpb = blockDim.x / threads_per_catom; // e.g. 2 catom per block DataT* sdx = &smem[groupIdx * maxnbrs_per_atom_aligned]; int offset = ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdy = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdz = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdjx_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdjy_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdjz_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sdist = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sfc = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; DataT* sfc_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned]; offset += ncatom_per_tpb * maxnbrs_per_atom_aligned; int* stype = (int*)&smem[offset + groupIdx * maxnbrs_per_atom_aligned]; DataT EtaA = EtaA_t[0]; DataT Zeta = Zeta_t[0]; IndexT nShfA = ShfA_t.size(0); IndexT nShfZ = ShfZ_t.size(0); DataT Rca = aev_params.Rca; IndexT num_species = aev_params.num_species; PairDist<DataT> d = d_centralAtom[cIdx]; int start_idx = d_centerAtomStartIdx[cIdx]; int jnum = d_nPairsPerCenterAtom[cIdx]; // center atom int i = d.i; int mol_idx = d.midx; DataT xi = pos_t[mol_idx][i][0]; DataT yi = pos_t[mol_idx][i][1]; DataT zi = pos_t[mol_idx][i][2]; for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) { PairDist<DataT> dij = d_Rij[start_idx + jj]; int j = dij.j; DataT Rij = dij.Rij; SpeciesT type_j = species_t[mol_idx][j]; sdx[jj] = pos_t[mol_idx][j][0] - xi; sdy[jj] = pos_t[mol_idx][j][1] - yi; sdz[jj] = pos_t[mol_idx][j][2] - zi; stype[jj] = type_j; sdist[jj] = Rij; // cutoff DataT fc_ij = 0.5 * cos(PI * Rij / Rca) + 0.5; DataT fc_ij_grad = -0.5 * (PI / Rca) * sin(PI * Rij / Rca); sfc[jj] = fc_ij; sfc_grad[jj] = fc_ij_grad; } // grad init DataT sdix_grad = 0; DataT sdiy_grad = 0; DataT sdiz_grad = 0; for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) { sdjx_grad[jj] = 0; sdjy_grad[jj] = 0; sdjz_grad[jj] = 0; } short2 tile = make_short2(laneIdx % TILEX, laneIdx / TILEX); const DataT tc = 0.95; // theta constant factor // must sync if threads_per_catom != 32 (wrap size) to make sure shared data is ready // __syncthreads for (int jj = 0; jj < jnum; jj++) { const DataT Rij = sdist[jj]; SpeciesT type_j = stype[jj]; DataT fc_ij = sfc[jj]; DataT grad_fc_ij = sfc_grad[jj]; for (int kk_start = jj + 1; kk_start < jnum; kk_start += threads_per_catom) { int kk = kk_start + laneIdx; DataT theta = 0; DataT grad_theta_vij_x = 0; DataT grad_theta_vij_y = 0; DataT grad_theta_vij_z = 0; DataT grad_theta_vik_x = 0; DataT grad_theta_vik_y = 0; DataT grad_theta_vik_z = 0; if (kk < jnum) { const DataT Rik = sdist[kk]; DataT vij_vik_dot = sdx[jj] * sdx[kk] + sdy[jj] * sdy[kk] + sdz[jj] * sdz[kk]; theta = acos(tc * vij_vik_dot / (Rij * Rik)); // grad DataT vij_factor = tc / (Rij * Rij * Rij * sqrt(-tc * tc * vij_vik_dot * vij_vik_dot / (Rij * Rij) + Rik * Rik)); DataT vik_factor = tc / (Rik * Rik * Rik * sqrt(-tc * tc * vij_vik_dot * vij_vik_dot / (Rik * Rik) + Rij * Rij)); // tricky 80ms improved grad_theta_vij_x = vij_factor * (sdx[jj] * vij_vik_dot - sdx[kk] * Rij * Rij); grad_theta_vij_y = vij_factor * (sdy[jj] * vij_vik_dot - sdy[kk] * Rij * Rij); grad_theta_vij_z = vij_factor * (sdz[jj] * vij_vik_dot - sdz[kk] * Rij * Rij); grad_theta_vik_x = vik_factor * (sdx[kk] * vij_vik_dot - sdx[jj] * Rik * Rik); grad_theta_vik_y = vik_factor * (sdy[kk] * vij_vik_dot - sdy[jj] * Rik * Rik); grad_theta_vik_z = vik_factor * (sdz[kk] * vij_vik_dot - sdz[jj] * Rik * Rik); } for (int srcLane = 0; srcLane < C10_WARP_SIZE && (kk_start + srcLane) < jnum; ++srcLane) { int kk = kk_start + srcLane; DataT theta_ijk = __shfl_sync(0xFFFFFFFF, theta, srcLane); // TODO necessary? DataT grad_theta_vij_x_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_x, srcLane); DataT grad_theta_vij_y_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_y, srcLane); DataT grad_theta_vij_z_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_z, srcLane); DataT grad_theta_vik_x_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_x, srcLane); DataT grad_theta_vik_y_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_y, srcLane); DataT grad_theta_vik_z_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_z, srcLane); const DataT Rik = sdist[kk]; SpeciesT type_k = stype[kk]; DataT fc_ik = sfc[kk]; DataT grad_fc_ik = sfc_grad[kk]; DataT Rijk = (Rij + Rik) / 2; DataT fc_ijk = fc_ij * fc_ik; IndexT subaev_offset = csubaev_offsets[type_j * num_species + type_k]; for (int itheta = tile.x; itheta < nShfZ; itheta += TILEX) { DataT ShfZ = ShfZ_t[itheta]; DataT factor1 = pow((1 + cos(theta_ijk - ShfZ)) / 2, Zeta); DataT grad_factor1_theta = 1.0 / 2.0 * Zeta * pow((1 + cos(ShfZ - theta_ijk)) / 2, Zeta - 1) * sin(ShfZ - theta_ijk); // tricky 100ms improved for (int ishfr = tile.y; ishfr < nShfA; ishfr += TILEY) { DataT ShfA = ShfA_t[ishfr]; DataT factor2 = exp(-EtaA * (Rijk - ShfA) * (Rijk - ShfA)); DataT grad_factor2_dist = -EtaA * (Rijk - ShfA) * factor2; DataT grad_output_item = grad_output[mol_idx][i][aev_params.radial_length + subaev_offset + ishfr * nShfZ + itheta]; DataT grad_vij_x = 2 * grad_output_item * (grad_factor1_theta * grad_theta_vij_x_ * factor2 * fc_ijk + factor1 * grad_factor2_dist * sdx[jj] / Rij * fc_ijk + factor1 * factor2 * fc_ik * grad_fc_ij * sdx[jj] / Rij); DataT grad_vij_y = 2 * grad_output_item * (grad_factor1_theta * grad_theta_vij_y_ * factor2 * fc_ijk + factor1 * grad_factor2_dist * sdy[jj] / Rij * fc_ijk + factor1 * factor2 * fc_ik * grad_fc_ij * sdy[jj] / Rij); DataT grad_vij_z = 2 * grad_output_item * (grad_factor1_theta * grad_theta_vij_z_ * factor2 * fc_ijk + factor1 * grad_factor2_dist * sdz[jj] / Rij * fc_ijk + factor1 * factor2 * fc_ik * grad_fc_ij * sdz[jj] / Rij); DataT grad_vik_x = 2 * grad_output_item * (grad_factor1_theta * grad_theta_vik_x_ * factor2 * fc_ijk + factor1 * grad_factor2_dist * sdx[kk] / Rik * fc_ijk + factor1 * factor2 * fc_ij * grad_fc_ik * sdx[kk] / Rik); DataT grad_vik_y = 2 * grad_output_item * (grad_factor1_theta * grad_theta_vik_y_ * factor2 * fc_ijk + factor1 * grad_factor2_dist * sdy[kk] / Rik * fc_ijk + factor1 * factor2 * fc_ij * grad_fc_ik * sdy[kk] / Rik); DataT grad_vik_z = 2 * grad_output_item * (grad_factor1_theta * grad_theta_vik_z_ * factor2 * fc_ijk + factor1 * grad_factor2_dist * sdz[kk] / Rik * fc_ijk + factor1 * factor2 * fc_ij * grad_fc_ik * sdz[kk] / Rik); sdix_grad += (-grad_vij_x - grad_vik_x); sdiy_grad += (-grad_vij_y - grad_vik_y); sdiz_grad += (-grad_vij_z - grad_vik_z); for (int offset = 16; offset > 0; offset /= 2) { grad_vij_x += __shfl_down_sync(0xFFFFFFFF, grad_vij_x, offset); grad_vij_y += __shfl_down_sync(0xFFFFFFFF, grad_vij_y, offset); grad_vij_z += __shfl_down_sync(0xFFFFFFFF, grad_vij_z, offset); grad_vik_x += __shfl_down_sync(0xFFFFFFFF, grad_vik_x, offset); grad_vik_y += __shfl_down_sync(0xFFFFFFFF, grad_vik_y, offset); grad_vik_z += __shfl_down_sync(0xFFFFFFFF, grad_vik_z, offset); } if (laneIdx == 0) { sdjx_grad[jj] += grad_vij_x; sdjy_grad[jj] += grad_vij_y; sdjz_grad[jj] += grad_vij_z; sdjx_grad[kk] += grad_vik_x; sdjy_grad[kk] += grad_vik_y; sdjz_grad[kk] += grad_vik_z; } } } } } } int atomi_idx = i; atomicAdd(&grad_coord[mol_idx][atomi_idx][0], sdix_grad); atomicAdd(&grad_coord[mol_idx][atomi_idx][1], sdiy_grad); atomicAdd(&grad_coord[mol_idx][atomi_idx][2], sdiz_grad); for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) { int atomj_idx = d_Rij[start_idx + jj].j; atomicAdd(&grad_coord[mol_idx][atomj_idx][0], sdjx_grad[jj]); atomicAdd(&grad_coord[mol_idx][atomj_idx][1], sdjy_grad[jj]); atomicAdd(&grad_coord[mol_idx][atomj_idx][2], sdjz_grad[jj]); } } template <typename SpeciesT, typename DataT, int THREADS_PER_RIJ> __global__ void cuRadialAEVs( torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfR_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaR_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> aev_t, PairDist<DataT>* d_Rij, AEVScalarParams<DataT, int> aev_params, int nRadialRij) { int gidx = blockIdx.x * blockDim.x + threadIdx.x; int idx = gidx / THREADS_PER_RIJ; int nShfR = ShfR_t.size(0); DataT EtaR = EtaR_t[0]; if (idx >= nRadialRij) return; int laneIdx = threadIdx.x % THREADS_PER_RIJ; PairDist<DataT> d = d_Rij[idx]; DataT Rij = d.Rij; int mol_idx = d.midx; int i = d.i; int j = d.j; SpeciesT type_j = species_t[mol_idx][j]; DataT fc = 0.5 * cos(PI * Rij / aev_params.Rcr) + 0.5; for (int ishfr = laneIdx; ishfr < nShfR; ishfr += THREADS_PER_RIJ) { DataT ShfR = ShfR_t[ishfr]; DataT GmR = 0.25 * exp(-EtaR * (Rij - ShfR) * (Rij - ShfR)) * fc; atomicAdd(&aev_t[mol_idx][i][type_j * aev_params.radial_sublength + ishfr], GmR); } } // every <THREADS_PER_RIJ> threads take care of 1 RIJ, and iterate <nShfR / THREADS_PER_RIJ> times template <typename SpeciesT, typename DataT, int THREADS_PER_RIJ> __global__ void cuRadialAEVs_backward( torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfR_t, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaR_t, torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> grad_output, torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> grad_radial_dist, const PairDist<DataT>* d_Rij, AEVScalarParams<DataT, int> aev_params, int nRadialRij) { int gidx = blockIdx.x * blockDim.x + threadIdx.x; int idx = gidx / THREADS_PER_RIJ; int nShfR = ShfR_t.size(0); DataT EtaR = EtaR_t[0]; if (idx >= nRadialRij) return; int laneIdx = threadIdx.x % THREADS_PER_RIJ; PairDist<DataT> d = d_Rij[idx]; DataT Rij = d.Rij; int mol_idx = d.midx; int i = d.i; int j = d.j; SpeciesT type_j = species_t[mol_idx][j]; DataT fc = 0.5 * cos(PI * Rij / aev_params.Rcr) + 0.5; DataT fc_grad = -0.5 * (PI / aev_params.Rcr) * sin(PI * Rij / aev_params.Rcr); for (int ishfr = laneIdx; ishfr < nShfR; ishfr += THREADS_PER_RIJ) { DataT ShfR = ShfR_t[ishfr]; DataT GmR = 0.25 * exp(-EtaR * (Rij - ShfR) * (Rij - ShfR)); DataT GmR_grad = -EtaR * (-2 * ShfR + 2 * Rij) * GmR; DataT grad_output_item = grad_output[mol_idx][i][type_j * aev_params.radial_sublength + ishfr]; DataT grad_radial_dist_item = grad_output_item * (GmR_grad * fc + GmR * fc_grad); atomicAdd(&grad_radial_dist[idx], grad_radial_dist_item); } } template <typename DataT> void cubScan(const DataT* d_in, DataT* d_out, int num_items, cudaStream_t stream) { auto& allocator = *c10::cuda::CUDACachingAllocator::get(); // Determine temporary device storage requirements void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream); // Allocate temporary storage auto buffer_tmp = allocator.allocate(temp_storage_bytes); d_temp_storage = buffer_tmp.get(); // Run exclusive prefix sum cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream); } template <typename DataT, typename IndexT> int cubEncode( const DataT* d_in, DataT* d_unique_out, IndexT* d_counts_out, int num_items, int* d_num_runs_out, cudaStream_t stream) { auto& allocator = *c10::cuda::CUDACachingAllocator::get(); // Determine temporary device storage requirements void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceRunLengthEncode::Encode( d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items, stream); // Allocate temporary storage auto buffer_tmp = allocator.allocate(temp_storage_bytes); d_temp_storage = buffer_tmp.get(); // Run encoding cub::DeviceRunLengthEncode::Encode( d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items, stream); int num_selected = 0; cudaMemcpyAsync(&num_selected, d_num_runs_out, sizeof(int), cudaMemcpyDefault, stream); cudaStreamSynchronize(stream); return num_selected; } template <typename DataT, typename LambdaOpT> int cubDeviceSelect( const DataT* d_in, DataT* d_out, int num_items, int* d_num_selected_out, LambdaOpT select_op, cudaStream_t stream) { auto& allocator = *c10::cuda::CUDACachingAllocator::get(); // Determine temporary device storage requirements void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op); // Allocate temporary storage auto buffer_tmp = allocator.allocate(temp_storage_bytes); d_temp_storage = buffer_tmp.get(); // Run selection cub::DeviceSelect::If( d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream); int num_selected = 0; cudaMemcpyAsync(&num_selected, d_num_selected_out, sizeof(int), cudaMemcpyDefault, stream); cudaStreamSynchronize(stream); return num_selected; } template <typename DataT> DataT cubMax(const DataT* d_in, int num_items, DataT* d_out, cudaStream_t stream) { auto& allocator = *c10::cuda::CUDACachingAllocator::get(); // Determine temporary device storage requirements void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream); // Allocate temporary storage auto buffer_tmp = allocator.allocate(temp_storage_bytes); d_temp_storage = buffer_tmp.get(); // Run min-reduction cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream); int maxVal = 0; cudaMemcpyAsync(&maxVal, d_out, sizeof(DataT), cudaMemcpyDefault, stream); cudaStreamSynchronize(stream); return maxVal; } void initConsts(AEVScalarParams<float>& aev_params, cudaStream_t stream) { int num_species = aev_params.num_species; assert(num_species <= MAX_NSPECIES); // precompute the aev offsets and load to constand memory int* subaev_offsets = new int[num_species * num_species]; for (int t = 0; t < num_species; ++t) { int offset = 0; for (int s = 0; s < num_species; s++) { if (t < num_species - s) { subaev_offsets[s * num_species + s + t] = aev_params.angular_sublength * (offset + t); subaev_offsets[(s + t) * num_species + s] = aev_params.angular_sublength * (offset + t); } offset += num_species - s; } } cudaMemcpyToSymbolAsync( csubaev_offsets, subaev_offsets, sizeof(int) * num_species * num_species, 0, cudaMemcpyDefault, stream); delete[] subaev_offsets; } struct Result { Tensor aev_t; AEVScalarParams<float> aev_params; Tensor tensor_Rij; Tensor tensor_radialRij; Tensor tensor_angularRij; int total_natom_pairs; int nRadialRij; int nAngularRij; Tensor tensor_centralAtom; Tensor tensor_numPairsPerCenterAtom; Tensor tensor_centerAtomStartIdx; int maxnbrs_per_atom_aligned; int angular_length_aligned; int ncenter_atoms; }; // NOTE: assumes size of EtaA_t = Zeta_t = EtaR_t = 1 template <typename ScalarRealT = float> Result cuaev_forward( const Tensor& coordinates_t, const Tensor& species_t, double Rcr_, double Rca_, const Tensor& EtaR_t, const Tensor& ShfR_t, const Tensor& EtaA_t, const Tensor& Zeta_t, const Tensor& ShfA_t, const Tensor& ShfZ_t, int64_t num_species_) { TORCH_CHECK( (species_t.dtype() == torch::kInt32) && (coordinates_t.dtype() == torch::kFloat32), "Unsupported input type"); TORCH_CHECK( EtaR_t.size(0) == 1 || EtaA_t.size(0) == 1 || Zeta_t.size(0) == 1, "cuda extension is currently not supported for the specified " "configuration"); ScalarRealT Rcr = Rcr_; ScalarRealT Rca = Rca_; int num_species = num_species_; const int n_molecules = species_t.size(0); const int max_natoms_per_mol = species_t.size(1); AEVScalarParams<float> aev_params; aev_params.Rca = Rca; aev_params.Rcr = Rcr; aev_params.num_species = num_species; aev_params.radial_sublength = EtaR_t.size(0) * ShfR_t.size(0); aev_params.radial_length = aev_params.radial_sublength * num_species; aev_params.angular_sublength = EtaA_t.size(0) * Zeta_t.size(0) * ShfA_t.size(0) * ShfZ_t.size(0); aev_params.angular_length = aev_params.angular_sublength * (num_species * (num_species + 1) / 2); int aev_length = aev_params.radial_length + aev_params.angular_length; auto aev_t = torch::zeros({n_molecules, max_natoms_per_mol, aev_length}, coordinates_t.options()); if (species_t.numel() == 0) { return {aev_t, aev_params, Tensor(), Tensor(), Tensor(), 0, 0, 0}; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto thrust_allocator = THCThrustAllocator(at::globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(thrust_allocator).on(stream); auto& allocator = *c10::cuda::CUDACachingAllocator::get(); // precompute the aev offsets and load to constand memory initConsts(aev_params, stream); // buffer to store all the pairwise distance (Rij) auto total_natom_pairs = n_molecules * max_natoms_per_mol * max_natoms_per_mol; auto d_options = torch::dtype(torch::kUInt8).device(coordinates_t.device()); Tensor tensor_Rij = torch::empty(sizeof(PairDist<float>) * total_natom_pairs, d_options); PairDist<float>* d_Rij = (PairDist<float>*)tensor_Rij.data_ptr(); // init all Rij to inf PairDist<float> init; init.Rij = std::numeric_limits<float>::infinity(); thrust::fill(policy, d_Rij, d_Rij + total_natom_pairs, init); // buffer to store all the pairwise distance that is needed for Radial AEV // computation Tensor tensor_radialRij = torch::empty(sizeof(PairDist<float>) * total_natom_pairs, d_options); PairDist<float>* d_radialRij = (PairDist<float>*)tensor_radialRij.data_ptr(); auto buffer_count = allocator.allocate(sizeof(int)); int* d_count_out = (int*)buffer_count.get(); const int block_size = 64; dim3 block(8, 8, 1); if (n_molecules == 1) { int tileWidth = 32; int tilesPerRow = (max_natoms_per_mol + tileWidth - 1) / tileWidth; dim3 block(tileWidth, tileWidth, 1); dim3 grid(tilesPerRow, tilesPerRow, 1); pairwiseDistanceSingleMolecule<<<grid, block, 0, stream>>>( species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), d_Rij, max_natoms_per_mol); } else { dim3 block(8, 8, 1); // Compute pairwise distance (Rij) for all atom pairs in a molecule // maximum 4096 atoms, which needs 49152 byte (48 kb) of shared memory // TODO: the kernel is not optimized for batched huge molecule (max_natoms_per_mol > 1000) pairwiseDistance<<<n_molecules, block, sizeof(float) * max_natoms_per_mol * 3, stream>>>( species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), d_Rij, max_natoms_per_mol); } // Extract Rijs that is needed for RadialAEV comptuation i.e. all the Rij <= Rcr int nRadialRij = cubDeviceSelect( d_Rij, d_radialRij, total_natom_pairs, d_count_out, [=] __device__(const PairDist<float> d) { return d.Rij <= Rcr; }, stream); int nblocks = (nRadialRij * 8 + block_size - 1) / block_size; cuRadialAEVs<int, float, 8><<<nblocks, block_size, 0, stream>>>( species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), aev_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), d_radialRij, aev_params, nRadialRij); // reuse buffer allocated for all Rij // d_angularRij will store all the Rij required in Angular AEV computation Tensor tensor_angularRij = torch::empty(sizeof(PairDist<float>) * nRadialRij, d_options); PairDist<float>* d_angularRij = (PairDist<float>*)tensor_angularRij.data_ptr(); // Extract Rijs that is needed for AngularAEV comptuation i.e. all the Rij // <= Rca int nAngularRij = cubDeviceSelect( d_radialRij, d_angularRij, nRadialRij, d_count_out, [=] __device__(const PairDist<float> d) { return d.Rij <= Rca; }, stream); Tensor tensor_centralAtom = torch::empty(sizeof(PairDist<float>) * nAngularRij, d_options); PairDist<float>* d_centralAtom = (PairDist<float>*)tensor_centralAtom.data_ptr(); Tensor tensor_numPairsPerCenterAtom = torch::empty(sizeof(int) * nAngularRij, d_options); int* d_numPairsPerCenterAtom = (int*)tensor_numPairsPerCenterAtom.data_ptr(); // group by center atom int ncenter_atoms = cubEncode(d_angularRij, d_centralAtom, d_numPairsPerCenterAtom, nAngularRij, d_count_out, stream); Tensor tensor_centerAtomStartIdx = torch::empty(sizeof(int) * ncenter_atoms, d_options); int* d_centerAtomStartIdx = (int*)tensor_centerAtomStartIdx.data_ptr(); cubScan(d_numPairsPerCenterAtom, d_centerAtomStartIdx, ncenter_atoms, stream); { const int nthreads_per_catom = 32; const int nblocks_angAEV = (ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size; auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) { int sm_aev = sizeof(float) * align<4>(aev_params.angular_length); // (angular_length / 4 + 1) * 4 int sxyz = sizeof(float) * max_nbrs * 3; int sRij = sizeof(float) * max_nbrs; int sfc = sizeof(float) * max_nbrs; int sj = sizeof(int) * max_nbrs; return (sm_aev + sxyz + sRij + sfc + sj) * ncatom_per_tpb; }; int maxNbrsPerCenterAtom = cubMax(d_numPairsPerCenterAtom, ncenter_atoms, d_count_out, stream); int maxnbrs_per_atom_aligned = align<4>(maxNbrsPerCenterAtom); int smem_size_aligned = smem_size(maxnbrs_per_atom_aligned, block_size / nthreads_per_catom); int angular_length_aligned = align<4>(aev_params.angular_length); cuAngularAEVs<<<nblocks_angAEV, block_size, smem_size_aligned, stream>>>( species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), aev_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), d_angularRij, d_centralAtom, d_numPairsPerCenterAtom, d_centerAtomStartIdx, aev_params, maxnbrs_per_atom_aligned, angular_length_aligned, ncenter_atoms); return {aev_t, aev_params, tensor_Rij, tensor_radialRij, tensor_angularRij, total_natom_pairs, nRadialRij, nAngularRij, tensor_centralAtom, tensor_numPairsPerCenterAtom, tensor_centerAtomStartIdx, maxnbrs_per_atom_aligned, angular_length_aligned, ncenter_atoms}; } } Tensor cuaev_backward( const Tensor& grad_output, const Tensor& coordinates_t, const Tensor& species_t, const AEVScalarParams<float>& aev_params, const Tensor& EtaR_t, const Tensor& ShfR_t, const Tensor& EtaA_t, const Tensor& Zeta_t, const Tensor& ShfA_t, const Tensor& ShfZ_t, const Tensor& tensor_Rij, int total_natom_pairs, const Tensor& tensor_radialRij, int nRadialRij, const Tensor& tensor_angularRij, int nAngularRij, const Tensor& tensor_centralAtom, const Tensor& tensor_numPairsPerCenterAtom, const Tensor& tensor_centerAtomStartIdx, int maxnbrs_per_atom_aligned, int angular_length_aligned, int ncenter_atoms) { using namespace torch::indexing; const int n_molecules = coordinates_t.size(0); const int max_natoms_per_mol = coordinates_t.size(1); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto grad_coord = torch::zeros(coordinates_t.sizes(), coordinates_t.options().requires_grad(false)); // [2, 5, 3] auto grad_output_radial = grad_output.index({Ellipsis, Slice(None, aev_params.radial_length)}); // [2, 5, 64] auto grad_output_angular = grad_output.index({Ellipsis, Slice(aev_params.radial_length, None)}); // [2, 5, 320] PairDist<float>* d_Rij = (PairDist<float>*)tensor_Rij.data_ptr(); PairDist<float>* d_radialRij = (PairDist<float>*)tensor_radialRij.data_ptr(); PairDist<float>* d_angularRij = (PairDist<float>*)tensor_angularRij.data_ptr(); PairDist<float>* d_centralAtom = (PairDist<float>*)tensor_centralAtom.data_ptr(); int* d_numPairsPerCenterAtom = (int*)tensor_numPairsPerCenterAtom.data_ptr(); int* d_centerAtomStartIdx = (int*)tensor_centerAtomStartIdx.data_ptr(); Tensor grad_radial_dist = torch::zeros(nRadialRij, coordinates_t.options().requires_grad(false)); int block_size = 64; int nblocks = (nRadialRij * 8 + block_size - 1) / block_size; cuRadialAEVs_backward<int, float, 8><<<nblocks, block_size, 0, stream>>>( species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), grad_output.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), grad_radial_dist.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), d_radialRij, aev_params, nRadialRij); // For best result, block_size should match average molecule size (no padding) to avoid atomicAdd nblocks = (nRadialRij + block_size - 1) / block_size; pairwiseDistance_backward<<<nblocks, block_size, 0, stream>>>( coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), grad_radial_dist.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), grad_coord.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), d_radialRij, nRadialRij); auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) { int sxyz = sizeof(float) * max_nbrs * 3; int sj_xyz_grad = sizeof(float) * max_nbrs * 3; int sRij = sizeof(float) * max_nbrs; int sfc = sizeof(float) * max_nbrs; int sfc_grad = sizeof(float) * max_nbrs; int sj = sizeof(int) * max_nbrs; return (sxyz + sj_xyz_grad + sRij + sfc + sfc_grad + sj) * ncatom_per_tpb; }; block_size = 32; const int nthreads_per_catom = 32; const int nblocks_angAEV = (ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size; int smem_size_aligned = smem_size(maxnbrs_per_atom_aligned, block_size / nthreads_per_catom); Tensor grad_angular_coord = torch::zeros({nAngularRij, 3}, coordinates_t.options().requires_grad(false)); cuAngularAEVs_backward<<<nblocks_angAEV, block_size, smem_size_aligned, stream>>>( species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(), grad_output.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), grad_coord.packed_accessor32<float, 3, torch::RestrictPtrTraits>(), d_angularRij, d_centralAtom, d_numPairsPerCenterAtom, d_centerAtomStartIdx, aev_params, maxnbrs_per_atom_aligned, angular_length_aligned, ncenter_atoms); return grad_coord; } #define AEV_INPUT \ const Tensor &coordinates_t, const Tensor &species_t, double Rcr_, double Rca_, const Tensor &EtaR_t, \ const Tensor &ShfR_t, const Tensor &EtaA_t, const Tensor &Zeta_t, const Tensor &ShfA_t, const Tensor &ShfZ_t, \ int64_t num_species_ Tensor cuaev_cuda(AEV_INPUT) { Result res = cuaev_forward<float>( coordinates_t, species_t, Rcr_, Rca_, EtaR_t, ShfR_t, EtaA_t, Zeta_t, ShfA_t, ShfZ_t, num_species_); return res.aev_t; } class CuaevAutograd : public torch::autograd::Function<CuaevAutograd> { public: static Tensor forward(torch::autograd::AutogradContext* ctx, AEV_INPUT) { at::AutoNonVariableTypeMode g; Result res = cuaev_forward<float>( coordinates_t, species_t, Rcr_, Rca_, EtaR_t, ShfR_t, EtaA_t, Zeta_t, ShfA_t, ShfZ_t, num_species_); if (coordinates_t.requires_grad()) { ctx->save_for_backward({coordinates_t, species_t, res.tensor_Rij, res.tensor_radialRij, res.tensor_angularRij, EtaR_t, ShfR_t, EtaA_t, Zeta_t, ShfA_t, ShfZ_t, res.tensor_centralAtom, res.tensor_numPairsPerCenterAtom, res.tensor_centerAtomStartIdx}); ctx->saved_data["aev_params"] = res.aev_params; ctx->saved_data["int_list"] = c10::List<int64_t>{res.total_natom_pairs, res.nRadialRij, res.nAngularRij, res.maxnbrs_per_atom_aligned, res.angular_length_aligned, res.ncenter_atoms}; } return res.aev_t; } static tensor_list backward(torch::autograd::AutogradContext* ctx, tensor_list grad_outputs) { auto saved = ctx->get_saved_variables(); auto coordinates_t = saved[0], species_t = saved[1]; auto tensor_Rij = saved[2], tensor_radialRij = saved[3], tensor_angularRij = saved[4]; auto EtaR_t = saved[5], ShfR_t = saved[6], EtaA_t = saved[7], Zeta_t = saved[8], ShfA_t = saved[9], ShfZ_t = saved[10]; auto tensor_centralAtom = saved[11], tensor_numPairsPerCenterAtom = saved[12], tensor_centerAtomStartIdx = saved[13]; AEVScalarParams<float> aev_params(ctx->saved_data["aev_params"]); c10::List<int64_t> int_list = ctx->saved_data["int_list"].toIntList(); int total_natom_pairs = int_list[0], nRadialRij = int_list[1], nAngularRij = int_list[2]; int maxnbrs_per_atom_aligned = int_list[3], angular_length_aligned = int_list[4]; int ncenter_atoms = int_list[5]; Tensor grad_coord = cuaev_backward( grad_outputs[0], coordinates_t, species_t, aev_params, EtaR_t, ShfR_t, EtaA_t, Zeta_t, ShfA_t, ShfZ_t, tensor_Rij, total_natom_pairs, tensor_radialRij, nRadialRij, tensor_angularRij, nAngularRij, tensor_centralAtom, tensor_numPairsPerCenterAtom, tensor_centerAtomStartIdx, maxnbrs_per_atom_aligned, angular_length_aligned, ncenter_atoms); return { grad_coord, Tensor(), Tensor(), Tensor(), Tensor(), Tensor(), Tensor(), Tensor(), Tensor(), Tensor(), Tensor()}; } }; Tensor cuaev_autograd(AEV_INPUT) { return CuaevAutograd::apply( coordinates_t, species_t, Rcr_, Rca_, EtaR_t, ShfR_t, EtaA_t, Zeta_t, ShfA_t, ShfZ_t, num_species_); } TORCH_LIBRARY(cuaev, m) { m.def("cuComputeAEV", cuaev_cuda); } TORCH_LIBRARY_IMPL(cuaev, CUDA, m) { m.impl("cuComputeAEV", cuaev_cuda); } TORCH_LIBRARY_IMPL(cuaev, Autograd, m) { m.impl("cuComputeAEV", cuaev_autograd); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {}
843055842dd7d1b131b7657386de131282c6edde.hip
// !!! This is a file automatically generated by hipify!!! #include "debug_plugin.h" #include <cassert> #include <string> #include <vector> #include "NvInfer.h" #include "debug_kernel.h" #include "serialize.hpp" using namespace nvinfer1; using namespace std; namespace debug_plugin { // Clip plugin specific constants namespace { static const char* DEBUG_VERSION{"1"}; static const char* DEBUG_NAME{"DebugPlugin"}; } // namespace /*REGISTER_TENSORRT_PLUGIN(DebugPluginCreator);*/ DebugPlugin::DebugPlugin(const std::string &name, const DataType data_type, int input_num, std::vector<nvinfer1::Dims> outputs_dims) : layer_name_(name) , data_type_(data_type) , num_inputs_(input_num) , outputs_dims_(outputs_dims) { } DebugPlugin::DebugPlugin(const std::string &name, const void* data, size_t length) : layer_name_(name) { deserialize_value(&data, &length, &data_type_); deserialize_value(&data, &length, &num_inputs_); size_t name_len = 0; deserialize_value(&data, &length, &name_len); // deserialize dims size_t outputs_dims_size = 0; deserialize_value(&data, &length, &outputs_dims_size); outputs_dims_.resize(outputs_dims_size); const char *d = static_cast<const char*>(data); for (int i = 0; i < outputs_dims_size; i++) { deserNvDimsToHost(d, outputs_dims_[i]); } char tmp[name_len]; deserToHost(d, tmp, name_len); layer_name_.resize(name_len); layer_name_ = std::string(tmp); gLogVerbose << "Starting to deserialize DEBUG plugin: " << layer_name_ << std::endl; } IPluginV2Ext* DebugPlugin::clone() const { auto p = new DebugPlugin(layer_name_, data_type_, num_inputs_, outputs_dims_); return p; } Dims DebugPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { outputs_dims_.push_back(inputs[index]); return inputs[index]; } bool DebugPlugin::supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const { return true; } void DebugPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { } size_t DebugPlugin::getWorkspaceSize(int maxBatchSize) const { return 0; } int DebugPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, hipStream_t stream) { for (size_t n = 0; n < num_inputs_; n++) { auto dims = outputs_dims_[n]; const int inputVolume = volume(dims) * batchSize; // remove dim = 1 or 0 vector<int> v_dims; v_dims.push_back(batchSize); for (int i = 0; i < dims.nbDims; i++) { int d = dims.d[i]; if (d > 1) v_dims.push_back(d); } if (data_type_ == DataType::kFLOAT) { const float* input = static_cast<const float*>(inputs[n]); float *arr = new float[inputVolume]; memset(arr, 0, inputVolume*sizeof(float)); hipMemcpy(arr, input, inputVolume*sizeof(float), hipMemcpyDeviceToHost); printf("layer_name=%s, dims=%s\n", layer_name_.c_str(), dims2String(dims).c_str()); p(arr, v_dims); delete [] arr; float* output = static_cast<float*>(outputs[n]); hipMemcpy(output, input, inputVolume*sizeof(float), hipMemcpyDeviceToDevice); } else if (data_type_ == DataType::kHALF) { #ifdef __SCORE_HALF__ const half* input = static_cast<const half*>(inputs[0]); #endif } else { assert(false); } } return 0; } // IPluginV2Ext Methods DataType DebugPlugin::getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const { assert(inputTypes[0] == DataType::kFLOAT || inputTypes[0] == DataType::kHALF); return inputTypes[0]; } const char* DebugPlugin::getPluginType() const { return DEBUG_NAME; } const char* DebugPlugin::getPluginVersion() const { return DEBUG_VERSION; } int DebugPlugin::getNbOutputs() const { return num_inputs_; } int DebugPlugin::initialize() { return 0; } void DebugPlugin::terminate() { } size_t DebugPlugin::getSerializationSize() const { return sizeof(data_type_) + sizeof(num_inputs_) + sizeof(int) * outputs_dims_.size() * (nvinfer1::Dims::MAX_DIMS+ 1) + sizeof(layer_name_.size()) + layer_name_.size() + 10; } void DebugPlugin::serialize(void* buffer) const { serialize_value(&buffer, data_type_); serialize_value(&buffer, num_inputs_); serialize_value(&buffer, layer_name_.size()); serialize_value(&buffer, outputs_dims_.size()); char *d = static_cast<char*>(buffer); for (size_t i = 0; i < outputs_dims_.size(); i++) { serNvDimsFromHost(d, outputs_dims_[i]); } serFromHost(d, layer_name_, (size_t)layer_name_.size()); } void DebugPlugin::destroy() { delete this; } void DebugPlugin::setPluginNamespace(const char* libNamespace) { namespace_ = libNamespace; } const char* DebugPlugin::getPluginNamespace() const { return namespace_.c_str(); } bool DebugPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const { return false; } bool DebugPlugin::canBroadcastInputAcrossBatch(int inputIndex) const { return false; } const char* DebugPluginCreator::getPluginName() const { return DEBUG_NAME; } const char* DebugPluginCreator::getPluginVersion() const { return DEBUG_VERSION; } const PluginFieldCollection* DebugPluginCreator::getFieldNames() { return &field_collection_; } IPluginV2* DebugPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { gLogVerbose << "Creating DebugPlugin...\n"; int typeId = -1; int input_num = 0; for (int i = 0; i < fc->nbFields; i++) { std::string field_name(fc->fields[i].name); if (field_name.compare("type_id") == 0) { typeId = *static_cast<const int*>(fc->fields[i].data); gLogVerbose << "Building typeId: " << typeId << std::endl; } if (field_name.compare("input_num") == 0) { input_num = *static_cast<const int*>(fc->fields[i].data); gLogVerbose << "Building input_num: " << input_num << std::endl; } } if (typeId < 0 || typeId > 2) { gLogError << "DEBUG: invalid typeId " << typeId << std::endl; return nullptr; } DataType type = static_cast<DataType>(typeId); gLogVerbose << "Creating DebugPlugin...\n"; return new DebugPlugin(name, type, input_num); } IPluginV2* DebugPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { return new DebugPlugin(name, serialData, serialLength); } void DebugPluginCreator::setPluginNamespace(const char* libNamespace) { namespace_ = libNamespace; } const char* DebugPluginCreator::getPluginNamespace() const { return namespace_.c_str(); } } // debug_plugin
843055842dd7d1b131b7657386de131282c6edde.cu
#include "debug_plugin.h" #include <cassert> #include <string> #include <vector> #include "NvInfer.h" #include "debug_kernel.h" #include "serialize.hpp" using namespace nvinfer1; using namespace std; namespace debug_plugin { // Clip plugin specific constants namespace { static const char* DEBUG_VERSION{"1"}; static const char* DEBUG_NAME{"DebugPlugin"}; } // namespace /*REGISTER_TENSORRT_PLUGIN(DebugPluginCreator);*/ DebugPlugin::DebugPlugin(const std::string &name, const DataType data_type, int input_num, std::vector<nvinfer1::Dims> outputs_dims) : layer_name_(name) , data_type_(data_type) , num_inputs_(input_num) , outputs_dims_(outputs_dims) { } DebugPlugin::DebugPlugin(const std::string &name, const void* data, size_t length) : layer_name_(name) { deserialize_value(&data, &length, &data_type_); deserialize_value(&data, &length, &num_inputs_); size_t name_len = 0; deserialize_value(&data, &length, &name_len); // deserialize dims size_t outputs_dims_size = 0; deserialize_value(&data, &length, &outputs_dims_size); outputs_dims_.resize(outputs_dims_size); const char *d = static_cast<const char*>(data); for (int i = 0; i < outputs_dims_size; i++) { deserNvDimsToHost(d, outputs_dims_[i]); } char tmp[name_len]; deserToHost(d, tmp, name_len); layer_name_.resize(name_len); layer_name_ = std::string(tmp); gLogVerbose << "Starting to deserialize DEBUG plugin: " << layer_name_ << std::endl; } IPluginV2Ext* DebugPlugin::clone() const { auto p = new DebugPlugin(layer_name_, data_type_, num_inputs_, outputs_dims_); return p; } Dims DebugPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { outputs_dims_.push_back(inputs[index]); return inputs[index]; } bool DebugPlugin::supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const { return true; } void DebugPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { } size_t DebugPlugin::getWorkspaceSize(int maxBatchSize) const { return 0; } int DebugPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) { for (size_t n = 0; n < num_inputs_; n++) { auto dims = outputs_dims_[n]; const int inputVolume = volume(dims) * batchSize; // remove dim = 1 or 0 vector<int> v_dims; v_dims.push_back(batchSize); for (int i = 0; i < dims.nbDims; i++) { int d = dims.d[i]; if (d > 1) v_dims.push_back(d); } if (data_type_ == DataType::kFLOAT) { const float* input = static_cast<const float*>(inputs[n]); float *arr = new float[inputVolume]; memset(arr, 0, inputVolume*sizeof(float)); cudaMemcpy(arr, input, inputVolume*sizeof(float), cudaMemcpyDeviceToHost); printf("layer_name=%s, dims=%s\n", layer_name_.c_str(), dims2String(dims).c_str()); p(arr, v_dims); delete [] arr; float* output = static_cast<float*>(outputs[n]); cudaMemcpy(output, input, inputVolume*sizeof(float), cudaMemcpyDeviceToDevice); } else if (data_type_ == DataType::kHALF) { #ifdef __SCORE_HALF__ const half* input = static_cast<const half*>(inputs[0]); #endif } else { assert(false); } } return 0; } // IPluginV2Ext Methods DataType DebugPlugin::getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const { assert(inputTypes[0] == DataType::kFLOAT || inputTypes[0] == DataType::kHALF); return inputTypes[0]; } const char* DebugPlugin::getPluginType() const { return DEBUG_NAME; } const char* DebugPlugin::getPluginVersion() const { return DEBUG_VERSION; } int DebugPlugin::getNbOutputs() const { return num_inputs_; } int DebugPlugin::initialize() { return 0; } void DebugPlugin::terminate() { } size_t DebugPlugin::getSerializationSize() const { return sizeof(data_type_) + sizeof(num_inputs_) + sizeof(int) * outputs_dims_.size() * (nvinfer1::Dims::MAX_DIMS+ 1) + sizeof(layer_name_.size()) + layer_name_.size() + 10; } void DebugPlugin::serialize(void* buffer) const { serialize_value(&buffer, data_type_); serialize_value(&buffer, num_inputs_); serialize_value(&buffer, layer_name_.size()); serialize_value(&buffer, outputs_dims_.size()); char *d = static_cast<char*>(buffer); for (size_t i = 0; i < outputs_dims_.size(); i++) { serNvDimsFromHost(d, outputs_dims_[i]); } serFromHost(d, layer_name_, (size_t)layer_name_.size()); } void DebugPlugin::destroy() { delete this; } void DebugPlugin::setPluginNamespace(const char* libNamespace) { namespace_ = libNamespace; } const char* DebugPlugin::getPluginNamespace() const { return namespace_.c_str(); } bool DebugPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const { return false; } bool DebugPlugin::canBroadcastInputAcrossBatch(int inputIndex) const { return false; } const char* DebugPluginCreator::getPluginName() const { return DEBUG_NAME; } const char* DebugPluginCreator::getPluginVersion() const { return DEBUG_VERSION; } const PluginFieldCollection* DebugPluginCreator::getFieldNames() { return &field_collection_; } IPluginV2* DebugPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { gLogVerbose << "Creating DebugPlugin...\n"; int typeId = -1; int input_num = 0; for (int i = 0; i < fc->nbFields; i++) { std::string field_name(fc->fields[i].name); if (field_name.compare("type_id") == 0) { typeId = *static_cast<const int*>(fc->fields[i].data); gLogVerbose << "Building typeId: " << typeId << std::endl; } if (field_name.compare("input_num") == 0) { input_num = *static_cast<const int*>(fc->fields[i].data); gLogVerbose << "Building input_num: " << input_num << std::endl; } } if (typeId < 0 || typeId > 2) { gLogError << "DEBUG: invalid typeId " << typeId << std::endl; return nullptr; } DataType type = static_cast<DataType>(typeId); gLogVerbose << "Creating DebugPlugin...\n"; return new DebugPlugin(name, type, input_num); } IPluginV2* DebugPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { return new DebugPlugin(name, serialData, serialLength); } void DebugPluginCreator::setPluginNamespace(const char* libNamespace) { namespace_ = libNamespace; } const char* DebugPluginCreator::getPluginNamespace() const { return namespace_.c_str(); } } // debug_plugin
b5a6694388601308cc32f55adbc0eb715c708496.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <THHUNN/THHUNN.h> #include <THHUNN/common.h> #include <THH/THHDeviceTensor.cuh> #include <THH/THHDeviceTensorUtils.cuh> #include <THH/THHDeviceUtils.cuh> #include <TH/THHalf.h> #include <THHUNN/THHHalfAutoNumerics.cuh> #include <THH/THHAtomics.cuh> #include <cfloat> template <typename Dtype, typename Acctype> __device__ inline int getInterval(Acctype sample, int index, int inputSize, int outputSize, int poolSize) { Acctype alpha = (Acctype)(inputSize - poolSize) / (Acctype) (outputSize - 1); if (index == outputSize - 1) { return inputSize - poolSize; } else { return (int) ((index + sample) * alpha) - (int) (sample * alpha); } } // We template on poolSizeW to allow the innermost loop to be unrolled template <int PoolSizeTStatic, typename Dtype, typename Acctype> __global__ void VolumetricFractionalMaxPooling_updateOutput( THCDeviceTensor<Dtype, 5> input, THCDeviceTensor<Dtype, 5> output, THCDeviceTensor<THCIndex_t, 5> indices, THCDeviceTensor<Dtype, 3> samples, int poolSizeT, int poolSizeW, int poolSizeH) { // Output (h, w) point that this thread is responsible for int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < output.getSize(2) * output.getSize(3) * output.getSize(4)){ int outputT = ourOutputPoint % output.getSize(4); int outputW = (ourOutputPoint / output.getSize(4)) % output.getSize(3); int outputH = ourOutputPoint / (output.getSize(3)*output.getSize(4)); int poolT = getInterval<Dtype, Acctype>(ScalarConvert<Dtype, Acctype>::to(samples[batch][plane][0]), outputT, input.getSize(4), output.getSize(4), poolSizeT); int poolW = getInterval<Dtype, Acctype>(ScalarConvert<Dtype, Acctype>::to(samples[batch][plane][1]), outputW, input.getSize(3), output.getSize(3), poolSizeW); int poolH = getInterval<Dtype, Acctype>(ScalarConvert<Dtype, Acctype>::to(samples[batch][plane][2]), outputH, input.getSize(2), output.getSize(2), poolSizeH); Dtype maxVal = THCNumerics<Dtype>::min(); int maxIndex = -1; for (int h = poolH; h < poolH + poolSizeH; ++h) { for (int w = poolW; w < poolW + poolSizeW; ++w) { if (PoolSizeTStatic == -1) { for (int t = poolT; t < poolT + poolSizeT; ++t) { Dtype val = input[batch][plane][h][w][t]; // for consistency with THNN, favor the first max if (val > maxVal) { maxIndex = h * input.getSize(3)*input.getSize(4) + w * input.getSize(4) + t; maxVal = val; } } } else { #pragma unroll for (int i = 0; i < PoolSizeTStatic; ++i) { int t = i + poolT; Dtype val = input[batch][plane][h][w][t]; // for consistency with THNN, favor the first max if (val > maxVal) { maxIndex = h * input.getSize(3)*input.getSize(4) + w * input.getSize(4) + t; maxVal = val; } } } } } assert(THCNumerics<Dtype>::ne(maxVal, THCNumerics<Dtype>::min())); assert(maxIndex != -1); // +1 for Lua index indices[batch][plane][outputH][outputW][outputT] = maxIndex + TH_INDEX_BASE; output[batch][plane][outputH][outputW][outputT] = maxVal; } } template <typename Dtype> __global__ void VolumetricFractionalMaxPooling_updateGradInput( THCDeviceTensor<Dtype, 5> gradInput, THCDeviceTensor<Dtype, 5> gradOutput, THCDeviceTensor<THCIndex_t, 5> indices) { // Output (h, w) point that this thread is responsible for int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < gradOutput.getSize(2) * gradOutput.getSize(3) * gradOutput.getSize(4)) { int outputT = ourOutputPoint % gradOutput.getSize(4); int outputW = (ourOutputPoint / gradOutput.getSize(4)) % gradOutput.getSize(3); int outputH = ourOutputPoint / (gradOutput.getSize(3)*gradOutput.getSize(4)); int index = indices[batch][plane][outputH][outputW][outputT] - TH_INDEX_BASE; assert(index >= 0); int inputT = index % gradInput.getSize(4); int inputW = (index / gradInput.getSize(4)) % gradInput.getSize(3); int inputH = index / (gradInput.getSize(3) * gradInput.getSize(4)); assert(inputH < gradInput.getSize(2)); atomicAdd(gradInput[batch][plane][inputH][inputW][inputT].data(), gradOutput[batch][plane][outputH][outputW][outputT]); } } #include <THHUNN/generic/VolumetricFractionalMaxPooling.hip> #include <THH/THHGenerateFloatTypes.h>
b5a6694388601308cc32f55adbc0eb715c708496.cu
#include <THCUNN/THCUNN.h> #include <THCUNN/common.h> #include <THC/THCDeviceTensor.cuh> #include <THC/THCDeviceTensorUtils.cuh> #include <THC/THCDeviceUtils.cuh> #include <TH/THHalf.h> #include <THCUNN/THCHalfAutoNumerics.cuh> #include <THC/THCAtomics.cuh> #include <cfloat> template <typename Dtype, typename Acctype> __device__ inline int getInterval(Acctype sample, int index, int inputSize, int outputSize, int poolSize) { Acctype alpha = (Acctype)(inputSize - poolSize) / (Acctype) (outputSize - 1); if (index == outputSize - 1) { return inputSize - poolSize; } else { return (int) ((index + sample) * alpha) - (int) (sample * alpha); } } // We template on poolSizeW to allow the innermost loop to be unrolled template <int PoolSizeTStatic, typename Dtype, typename Acctype> __global__ void VolumetricFractionalMaxPooling_updateOutput( THCDeviceTensor<Dtype, 5> input, THCDeviceTensor<Dtype, 5> output, THCDeviceTensor<THCIndex_t, 5> indices, THCDeviceTensor<Dtype, 3> samples, int poolSizeT, int poolSizeW, int poolSizeH) { // Output (h, w) point that this thread is responsible for int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < output.getSize(2) * output.getSize(3) * output.getSize(4)){ int outputT = ourOutputPoint % output.getSize(4); int outputW = (ourOutputPoint / output.getSize(4)) % output.getSize(3); int outputH = ourOutputPoint / (output.getSize(3)*output.getSize(4)); int poolT = getInterval<Dtype, Acctype>(ScalarConvert<Dtype, Acctype>::to(samples[batch][plane][0]), outputT, input.getSize(4), output.getSize(4), poolSizeT); int poolW = getInterval<Dtype, Acctype>(ScalarConvert<Dtype, Acctype>::to(samples[batch][plane][1]), outputW, input.getSize(3), output.getSize(3), poolSizeW); int poolH = getInterval<Dtype, Acctype>(ScalarConvert<Dtype, Acctype>::to(samples[batch][plane][2]), outputH, input.getSize(2), output.getSize(2), poolSizeH); Dtype maxVal = THCNumerics<Dtype>::min(); int maxIndex = -1; for (int h = poolH; h < poolH + poolSizeH; ++h) { for (int w = poolW; w < poolW + poolSizeW; ++w) { if (PoolSizeTStatic == -1) { for (int t = poolT; t < poolT + poolSizeT; ++t) { Dtype val = input[batch][plane][h][w][t]; // for consistency with THNN, favor the first max if (val > maxVal) { maxIndex = h * input.getSize(3)*input.getSize(4) + w * input.getSize(4) + t; maxVal = val; } } } else { #pragma unroll for (int i = 0; i < PoolSizeTStatic; ++i) { int t = i + poolT; Dtype val = input[batch][plane][h][w][t]; // for consistency with THNN, favor the first max if (val > maxVal) { maxIndex = h * input.getSize(3)*input.getSize(4) + w * input.getSize(4) + t; maxVal = val; } } } } } assert(THCNumerics<Dtype>::ne(maxVal, THCNumerics<Dtype>::min())); assert(maxIndex != -1); // +1 for Lua index indices[batch][plane][outputH][outputW][outputT] = maxIndex + TH_INDEX_BASE; output[batch][plane][outputH][outputW][outputT] = maxVal; } } template <typename Dtype> __global__ void VolumetricFractionalMaxPooling_updateGradInput( THCDeviceTensor<Dtype, 5> gradInput, THCDeviceTensor<Dtype, 5> gradOutput, THCDeviceTensor<THCIndex_t, 5> indices) { // Output (h, w) point that this thread is responsible for int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < gradOutput.getSize(2) * gradOutput.getSize(3) * gradOutput.getSize(4)) { int outputT = ourOutputPoint % gradOutput.getSize(4); int outputW = (ourOutputPoint / gradOutput.getSize(4)) % gradOutput.getSize(3); int outputH = ourOutputPoint / (gradOutput.getSize(3)*gradOutput.getSize(4)); int index = indices[batch][plane][outputH][outputW][outputT] - TH_INDEX_BASE; assert(index >= 0); int inputT = index % gradInput.getSize(4); int inputW = (index / gradInput.getSize(4)) % gradInput.getSize(3); int inputH = index / (gradInput.getSize(3) * gradInput.getSize(4)); assert(inputH < gradInput.getSize(2)); atomicAdd(gradInput[batch][plane][inputH][inputW][inputT].data(), gradOutput[batch][plane][outputH][outputW][outputT]); } } #include <THCUNN/generic/VolumetricFractionalMaxPooling.cu> #include <THC/THCGenerateFloatTypes.h>
945c976bfde3cb5d7e920f56e65232436b8e09f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from magmablas/zcaxpycp.cu mixed zc -> ds, Tue Feb 9 16:05:27 2016 */ #include "magma_internal.h" #define NB 64 // adds x += r (including conversion to double) --and-- // copies w = b // each thread does one index, x[i] and w[i] __global__ void dsaxpycp_kernel( int m, float *r, double *x, const double *b, double *w ) { const int i = threadIdx.x + blockIdx.x*NB; if ( i < m ) { x[i] = MAGMA_D_ADD( x[i], MAGMA_D_MAKE( MAGMA_D_REAL( r[i] ), MAGMA_D_IMAG( r[i] ) ) ); w[i] = b[i]; } } // ---------------------------------------------------------------------- // adds x += r (including conversion to double) --and-- // copies w = b extern "C" void magmablas_dsaxpycp_q( magma_int_t m, magmaFloat_ptr r, magmaDouble_ptr x, magmaDouble_const_ptr b, magmaDouble_ptr w, magma_queue_t queue ) { dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); hipLaunchKernelGGL(( dsaxpycp_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, r, x, b, w ); }
945c976bfde3cb5d7e920f56e65232436b8e09f6.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from magmablas/zcaxpycp.cu mixed zc -> ds, Tue Feb 9 16:05:27 2016 */ #include "magma_internal.h" #define NB 64 // adds x += r (including conversion to double) --and-- // copies w = b // each thread does one index, x[i] and w[i] __global__ void dsaxpycp_kernel( int m, float *r, double *x, const double *b, double *w ) { const int i = threadIdx.x + blockIdx.x*NB; if ( i < m ) { x[i] = MAGMA_D_ADD( x[i], MAGMA_D_MAKE( MAGMA_D_REAL( r[i] ), MAGMA_D_IMAG( r[i] ) ) ); w[i] = b[i]; } } // ---------------------------------------------------------------------- // adds x += r (including conversion to double) --and-- // copies w = b extern "C" void magmablas_dsaxpycp_q( magma_int_t m, magmaFloat_ptr r, magmaDouble_ptr x, magmaDouble_const_ptr b, magmaDouble_ptr w, magma_queue_t queue ) { dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); dsaxpycp_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, r, x, b, w ); }
521623794c55f548e91157480e77f60461043af8.hip
// !!! This is a file automatically generated by hipify!!! #include "solver.h" Solver::Solver(NeuralNet *model, void *X_train, int *y_train, void *X_val, int *y_val, int num_epoch, UpdateRule update_rule, double learning_rate, double learning_rate_decay, int num_train, int num_val) { this->model = model; this->X_train = X_train, this->X_val = X_val; this->y_train = y_train, this->y_val = y_val; this->num_epoch = num_epoch; this->update_rule = update_rule; this->learning_rate = learning_rate, this->learning_rate_decay = learning_rate_decay; this->num_train = num_train, this->num_val = num_val; this->num_features = model->input_channels * model->input_h * model->input_w; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); } float Solver::step(int start_X, int start_y) { std::vector<float> t1, t2; return this->step(start_X, start_y, t1, t2); } float Solver::step(int start_X, int start_y, std::vector<float> &fwd_vdnn_lag, std::vector<float> &bwd_vdnn_lag) { float temp_loss; // std::cout << "start_X: " << start_X << std::endl; if (model->data_type == CUDNN_DATA_FLOAT) model->getLoss(&(((float *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_vdnn_lag, bwd_vdnn_lag, true, NULL, &temp_loss); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getLoss(&(((double *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_vdnn_lag, bwd_vdnn_lag, true, NULL, &temp_loss); // float Salpha = -learning_rate; // double Dalpha = -learning_rate; // if (update_rule == SGD) { // for (int i = 0; i < model->num_layers; i++) { // if (model->layer_type[i] == CONV) { // ConvLayerParams *cur_params = (ConvLayerParams // *)model->params[i]; // int kernel_size = cur_params->C_in * cur_params->C_out * // cur_params->filter_h * cur_params->filter_w; // if (model->data_type == CUDNN_DATA_FLOAT) { // checkCUBLAS(hipblasSaxpy(model->cublas_handle, // kernel_size, // &Salpha, // (float *)cur_params->dW, // 1, // (float *)cur_params->W, // 1)); // checkCUBLAS(hipblasSaxpy(model->cublas_handle, // cur_params->C_out, // &Salpha, // (float *)cur_params->db, // 1, // (float *)cur_params->b, // 1)); // } // else if (model->data_type == CUDNN_DATA_DOUBLE) { // checkCUBLAS(hipblasDaxpy(model->cublas_handle, // kernel_size, // &Dalpha, // (double *)cur_params->dW, // 1, // (double *)cur_params->W, // 1)); // checkCUBLAS(hipblasDaxpy(model->cublas_handle, // cur_params->C_out, // &Dalpha, // (double *)cur_params->db, // 1, // (double *)cur_params->b, // 1)); // } // } // else if (model->layer_type[i] == FULLY_CONNECTED) { // FCLayerParams *cur_params = (FCLayerParams // *)model->params[i]; // if (model->data_type == CUDNN_DATA_FLOAT) { // checkCUBLAS(hipblasSaxpy(model->cublas_handle, cur_params->C_in // * cur_params->C_out, // &Salpha, // (float *)cur_params->dW, // 1, // (float *)cur_params->W, // 1)); // checkCUBLAS(hipblasSaxpy(model->cublas_handle, // cur_params->C_out, // &Salpha, // (float *)cur_params->db, // 1, // (float *)cur_params->b, // 1)); // } // else if (model->data_type == CUDNN_DATA_DOUBLE) { // checkCUBLAS(hipblasDaxpy(model->cublas_handle, cur_params->C_in // * cur_params->C_out, // &Dalpha, // (double *)cur_params->dW, // 1, // (double *)cur_params->W, // 1)); // checkCUBLAS(hipblasDaxpy(model->cublas_handle, // cur_params->C_out, // &Dalpha, // (double *)cur_params->db, // 1, // (double *)cur_params->b, // 1)); // } // } // else if (model->layer_type[i] == BATCHNORM) { // BatchNormLayerParams *cur_params = (BatchNormLayerParams // *)model->params[i]; // if (model->data_type == CUDNN_DATA_FLOAT) { // checkCUBLAS(hipblasSaxpy(model->cublas_handle, // cur_params->sbmv_size, // &Salpha, // (float *)cur_params->dscale, // 1, // (float *)cur_params->scale, // 1)); // checkCUBLAS(hipblasSaxpy(model->cublas_handle, // cur_params->sbmv_size, // &Salpha, // (float *)cur_params->dbias, // 1, // (float *)cur_params->bias, // 1)); // } // else if (model->data_type == CUDNN_DATA_DOUBLE) { // checkCUBLAS(hipblasDaxpy(model->cublas_handle, // cur_params->sbmv_size, // &Dalpha, // (double *)cur_params->dscale, // 1, // (double *)cur_params->scale, // 1)); // checkCUBLAS(hipblasDaxpy(model->cublas_handle, // cur_params->sbmv_size, // &Dalpha, // (double *)cur_params->dbias, // 1, // (double *)cur_params->bias, // 1)); // } // } // } // } checkCudaErrors(hipDeviceSynchronize()); return temp_loss; } void Solver::train(std::vector<float> &loss, std::vector<int> &val_acc) { int batch_size = model->batch_size; int num_train_batches = num_train / model->batch_size; int num_val_batches = num_val / model->batch_size; for (int i = 0; i < num_epoch; i++) { for (int j = 0; j < num_train_batches; j++) { int start_sample = j * num_features * batch_size; float milli = 0; checkCudaErrors(hipEventRecord(start, model->stream_compute)); float temp_loss = step(start_sample, j * batch_size); checkCudaErrors(hipEventRecord(stop, model->stream_compute)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&milli, start, stop)); std::cout << "One forward, backward pass time(ms): " << milli << std::endl; loss.push_back(temp_loss); std::cout << "loss: " << temp_loss << std::endl; } int correct_count = 0; for (int j = 0; j < num_val_batches; j++) { int start_sample = j * num_features * batch_size; int temp_correct_count; if (model->data_type == CUDNN_DATA_FLOAT) model->getLoss(&(((float *)X_val)[start_sample]), &y_val[j * batch_size], learning_rate, false, &temp_correct_count, NULL); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getLoss(&(((double *)X_val)[start_sample]), &y_val[j * batch_size], learning_rate, false, &temp_correct_count, NULL); correct_count += temp_correct_count; } val_acc.push_back(correct_count); std::cout << "val_acc: " << val_acc[i] << std::endl; // learning_rate *= learning_rate_decay; // std::cout << "learning_rate: " << learning_rate << std::endl; } learning_rate *= learning_rate_decay; } void Solver::checkAccuracy(void *X, int *y, int num_samples, int *num_correct) { int batch_size = model->batch_size; int num_iter = num_samples / batch_size; *num_correct = 0; for (int i = 0; i < num_iter; i++) { int start_sample = i * num_features * batch_size; int temp_correct_count; if (model->data_type == CUDNN_DATA_FLOAT) model->getLoss(&(((float *)X)[start_sample]), &y[i * batch_size], learning_rate, false, &temp_correct_count, NULL); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getLoss(&(((double *)X)[start_sample]), &y[i * batch_size], learning_rate, false, &temp_correct_count, NULL); *num_correct = *num_correct + temp_correct_count; } } void Solver::getTrainTime(std::vector<float> &loss, std::vector<float> &time, int num_epoch, std::vector<std::vector<float> > &fwd_vdnn_lag, std::vector<std::vector<float> > &bwd_vdnn_lag) { int batch_size = model->batch_size; int num_train_batches = num_train / model->batch_size; for (int i = 0; i < num_epoch; i++) { for (int j = 0; j < num_train_batches; j++) { int start_sample = j * num_features * batch_size; checkCudaErrors(hipEventRecord(start)); float milli; std::vector<float> cur_fwd_vdnn_lag, cur_bwd_vdnn_lag; float temp_loss = step(start_sample, j * batch_size, cur_fwd_vdnn_lag, cur_bwd_vdnn_lag); checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&milli, start, stop)); // std::cout << "One forward, backward pass time(ms): " << milli << // std::endl; fwd_vdnn_lag.push_back(cur_fwd_vdnn_lag); bwd_vdnn_lag.push_back(cur_bwd_vdnn_lag); loss.push_back(temp_loss); time.push_back(milli); // std::cout << "loss: " << temp_loss << std::endl; // for (int i = 0; i < cur_fwd_vdnn_lag.size(); i++) { // std::cout << "fwd_lag " << i << ":" << cur_fwd_vdnn_lag[i] << // std::endl; // } // for (int i = 0; i < cur_bwd_vdnn_lag.size(); i++) { // std::cout << "bwd_lag " << i << ":" << cur_bwd_vdnn_lag[i] << // std::endl; // } } } learning_rate *= learning_rate_decay; }
521623794c55f548e91157480e77f60461043af8.cu
#include "solver.h" Solver::Solver(NeuralNet *model, void *X_train, int *y_train, void *X_val, int *y_val, int num_epoch, UpdateRule update_rule, double learning_rate, double learning_rate_decay, int num_train, int num_val) { this->model = model; this->X_train = X_train, this->X_val = X_val; this->y_train = y_train, this->y_val = y_val; this->num_epoch = num_epoch; this->update_rule = update_rule; this->learning_rate = learning_rate, this->learning_rate_decay = learning_rate_decay; this->num_train = num_train, this->num_val = num_val; this->num_features = model->input_channels * model->input_h * model->input_w; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); } float Solver::step(int start_X, int start_y) { std::vector<float> t1, t2; return this->step(start_X, start_y, t1, t2); } float Solver::step(int start_X, int start_y, std::vector<float> &fwd_vdnn_lag, std::vector<float> &bwd_vdnn_lag) { float temp_loss; // std::cout << "start_X: " << start_X << std::endl; if (model->data_type == CUDNN_DATA_FLOAT) model->getLoss(&(((float *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_vdnn_lag, bwd_vdnn_lag, true, NULL, &temp_loss); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getLoss(&(((double *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_vdnn_lag, bwd_vdnn_lag, true, NULL, &temp_loss); // float Salpha = -learning_rate; // double Dalpha = -learning_rate; // if (update_rule == SGD) { // for (int i = 0; i < model->num_layers; i++) { // if (model->layer_type[i] == CONV) { // ConvLayerParams *cur_params = (ConvLayerParams // *)model->params[i]; // int kernel_size = cur_params->C_in * cur_params->C_out * // cur_params->filter_h * cur_params->filter_w; // if (model->data_type == CUDNN_DATA_FLOAT) { // checkCUBLAS(cublasSaxpy(model->cublas_handle, // kernel_size, // &Salpha, // (float *)cur_params->dW, // 1, // (float *)cur_params->W, // 1)); // checkCUBLAS(cublasSaxpy(model->cublas_handle, // cur_params->C_out, // &Salpha, // (float *)cur_params->db, // 1, // (float *)cur_params->b, // 1)); // } // else if (model->data_type == CUDNN_DATA_DOUBLE) { // checkCUBLAS(cublasDaxpy(model->cublas_handle, // kernel_size, // &Dalpha, // (double *)cur_params->dW, // 1, // (double *)cur_params->W, // 1)); // checkCUBLAS(cublasDaxpy(model->cublas_handle, // cur_params->C_out, // &Dalpha, // (double *)cur_params->db, // 1, // (double *)cur_params->b, // 1)); // } // } // else if (model->layer_type[i] == FULLY_CONNECTED) { // FCLayerParams *cur_params = (FCLayerParams // *)model->params[i]; // if (model->data_type == CUDNN_DATA_FLOAT) { // checkCUBLAS(cublasSaxpy(model->cublas_handle, cur_params->C_in // * cur_params->C_out, // &Salpha, // (float *)cur_params->dW, // 1, // (float *)cur_params->W, // 1)); // checkCUBLAS(cublasSaxpy(model->cublas_handle, // cur_params->C_out, // &Salpha, // (float *)cur_params->db, // 1, // (float *)cur_params->b, // 1)); // } // else if (model->data_type == CUDNN_DATA_DOUBLE) { // checkCUBLAS(cublasDaxpy(model->cublas_handle, cur_params->C_in // * cur_params->C_out, // &Dalpha, // (double *)cur_params->dW, // 1, // (double *)cur_params->W, // 1)); // checkCUBLAS(cublasDaxpy(model->cublas_handle, // cur_params->C_out, // &Dalpha, // (double *)cur_params->db, // 1, // (double *)cur_params->b, // 1)); // } // } // else if (model->layer_type[i] == BATCHNORM) { // BatchNormLayerParams *cur_params = (BatchNormLayerParams // *)model->params[i]; // if (model->data_type == CUDNN_DATA_FLOAT) { // checkCUBLAS(cublasSaxpy(model->cublas_handle, // cur_params->sbmv_size, // &Salpha, // (float *)cur_params->dscale, // 1, // (float *)cur_params->scale, // 1)); // checkCUBLAS(cublasSaxpy(model->cublas_handle, // cur_params->sbmv_size, // &Salpha, // (float *)cur_params->dbias, // 1, // (float *)cur_params->bias, // 1)); // } // else if (model->data_type == CUDNN_DATA_DOUBLE) { // checkCUBLAS(cublasDaxpy(model->cublas_handle, // cur_params->sbmv_size, // &Dalpha, // (double *)cur_params->dscale, // 1, // (double *)cur_params->scale, // 1)); // checkCUBLAS(cublasDaxpy(model->cublas_handle, // cur_params->sbmv_size, // &Dalpha, // (double *)cur_params->dbias, // 1, // (double *)cur_params->bias, // 1)); // } // } // } // } checkCudaErrors(cudaDeviceSynchronize()); return temp_loss; } void Solver::train(std::vector<float> &loss, std::vector<int> &val_acc) { int batch_size = model->batch_size; int num_train_batches = num_train / model->batch_size; int num_val_batches = num_val / model->batch_size; for (int i = 0; i < num_epoch; i++) { for (int j = 0; j < num_train_batches; j++) { int start_sample = j * num_features * batch_size; float milli = 0; checkCudaErrors(cudaEventRecord(start, model->stream_compute)); float temp_loss = step(start_sample, j * batch_size); checkCudaErrors(cudaEventRecord(stop, model->stream_compute)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&milli, start, stop)); std::cout << "One forward, backward pass time(ms): " << milli << std::endl; loss.push_back(temp_loss); std::cout << "loss: " << temp_loss << std::endl; } int correct_count = 0; for (int j = 0; j < num_val_batches; j++) { int start_sample = j * num_features * batch_size; int temp_correct_count; if (model->data_type == CUDNN_DATA_FLOAT) model->getLoss(&(((float *)X_val)[start_sample]), &y_val[j * batch_size], learning_rate, false, &temp_correct_count, NULL); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getLoss(&(((double *)X_val)[start_sample]), &y_val[j * batch_size], learning_rate, false, &temp_correct_count, NULL); correct_count += temp_correct_count; } val_acc.push_back(correct_count); std::cout << "val_acc: " << val_acc[i] << std::endl; // learning_rate *= learning_rate_decay; // std::cout << "learning_rate: " << learning_rate << std::endl; } learning_rate *= learning_rate_decay; } void Solver::checkAccuracy(void *X, int *y, int num_samples, int *num_correct) { int batch_size = model->batch_size; int num_iter = num_samples / batch_size; *num_correct = 0; for (int i = 0; i < num_iter; i++) { int start_sample = i * num_features * batch_size; int temp_correct_count; if (model->data_type == CUDNN_DATA_FLOAT) model->getLoss(&(((float *)X)[start_sample]), &y[i * batch_size], learning_rate, false, &temp_correct_count, NULL); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getLoss(&(((double *)X)[start_sample]), &y[i * batch_size], learning_rate, false, &temp_correct_count, NULL); *num_correct = *num_correct + temp_correct_count; } } void Solver::getTrainTime(std::vector<float> &loss, std::vector<float> &time, int num_epoch, std::vector<std::vector<float> > &fwd_vdnn_lag, std::vector<std::vector<float> > &bwd_vdnn_lag) { int batch_size = model->batch_size; int num_train_batches = num_train / model->batch_size; for (int i = 0; i < num_epoch; i++) { for (int j = 0; j < num_train_batches; j++) { int start_sample = j * num_features * batch_size; checkCudaErrors(cudaEventRecord(start)); float milli; std::vector<float> cur_fwd_vdnn_lag, cur_bwd_vdnn_lag; float temp_loss = step(start_sample, j * batch_size, cur_fwd_vdnn_lag, cur_bwd_vdnn_lag); checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&milli, start, stop)); // std::cout << "One forward, backward pass time(ms): " << milli << // std::endl; fwd_vdnn_lag.push_back(cur_fwd_vdnn_lag); bwd_vdnn_lag.push_back(cur_bwd_vdnn_lag); loss.push_back(temp_loss); time.push_back(milli); // std::cout << "loss: " << temp_loss << std::endl; // for (int i = 0; i < cur_fwd_vdnn_lag.size(); i++) { // std::cout << "fwd_lag " << i << ":" << cur_fwd_vdnn_lag[i] << // std::endl; // } // for (int i = 0; i < cur_bwd_vdnn_lag.size(); i++) { // std::cout << "bwd_lag " << i << ":" << cur_bwd_vdnn_lag[i] << // std::endl; // } } } learning_rate *= learning_rate_decay; }
070e1b013f59a80748e11579c582ce3856781f0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #include "THHTensor.hpp" #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include "THHHalf.h" #include "THHHalfAutoNumerics.cuh" /* * Description: */ __device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) { int x, y, z, w; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; w = w/scale_factor; z = z/scale_factor; d2 /= scale_factor; d3 /= scale_factor; return (((x*d1+y)*d2)+z)*d3+w; } __device__ int translate_idx_inv(int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y) { int x, y, z, w; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; w = w*scale_factor+off_x; z = z*scale_factor+off_y; d2 *= scale_factor; d3 *= scale_factor; return (((x*d1+y)*d2)+z)*d3+w; } template <typename Dtype> __global__ void upscale(Dtype *input, Dtype *output, int64_t no_elements, int scale_factor, int d1, int d2, int d3) { // output offset: int64_t ii = threadIdx.x + blockDim.x * blockIdx.x; ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y; if (ii >= no_elements) return; int ipidx = translate_idx(ii, d1, d2, d3, scale_factor); output[ii]=input[ipidx]; } /* * Description: */ template <typename Dtype, typename Acctype> __global__ void downscale(Dtype *gradInput_data, Dtype *gradOutput_data, int64_t no_elements, int scale_factor, int d1, int d2, int d3) { // output offset: int64_t ii = threadIdx.x + blockDim.x * blockIdx.x; ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y; if (ii >= no_elements) return; Acctype sum = Acctype(0); for (int i=0; i < scale_factor; i++){ for(int j=0; j < scale_factor; j++){ int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j); sum += gradOutput_data[ipidx]; } } gradInput_data[ii] += ScalarConvert<Acctype, Dtype>::to(sum); } #include "generic/SpatialUpSamplingNearest.cu" #include "THHGenerateFloatTypes.h"
070e1b013f59a80748e11579c582ce3856781f0b.cu
#include "THCUNN.h" #include "common.h" #include "THCTensor.hpp" #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include "THCHalf.h" #include "THCHalfAutoNumerics.cuh" /* * Description: */ __device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) { int x, y, z, w; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; w = w/scale_factor; z = z/scale_factor; d2 /= scale_factor; d3 /= scale_factor; return (((x*d1+y)*d2)+z)*d3+w; } __device__ int translate_idx_inv(int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y) { int x, y, z, w; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; w = w*scale_factor+off_x; z = z*scale_factor+off_y; d2 *= scale_factor; d3 *= scale_factor; return (((x*d1+y)*d2)+z)*d3+w; } template <typename Dtype> __global__ void upscale(Dtype *input, Dtype *output, int64_t no_elements, int scale_factor, int d1, int d2, int d3) { // output offset: int64_t ii = threadIdx.x + blockDim.x * blockIdx.x; ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y; if (ii >= no_elements) return; int ipidx = translate_idx(ii, d1, d2, d3, scale_factor); output[ii]=input[ipidx]; } /* * Description: */ template <typename Dtype, typename Acctype> __global__ void downscale(Dtype *gradInput_data, Dtype *gradOutput_data, int64_t no_elements, int scale_factor, int d1, int d2, int d3) { // output offset: int64_t ii = threadIdx.x + blockDim.x * blockIdx.x; ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y; if (ii >= no_elements) return; Acctype sum = Acctype(0); for (int i=0; i < scale_factor; i++){ for(int j=0; j < scale_factor; j++){ int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j); sum += gradOutput_data[ipidx]; } } gradInput_data[ii] += ScalarConvert<Acctype, Dtype>::to(sum); } #include "generic/SpatialUpSamplingNearest.cu" #include "THCGenerateFloatTypes.h"
c989b080a69b92c8f134ecb8af4fa03c13f22ee8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel_push1_start_stochastic.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *g_left_weight = NULL; hipMalloc(&g_left_weight, XSIZE*YSIZE); int *g_right_weight = NULL; hipMalloc(&g_right_weight, XSIZE*YSIZE); int *g_down_weight = NULL; hipMalloc(&g_down_weight, XSIZE*YSIZE); int *g_up_weight = NULL; hipMalloc(&g_up_weight, XSIZE*YSIZE); int *g_sink_weight = NULL; hipMalloc(&g_sink_weight, XSIZE*YSIZE); int *g_push_reser = NULL; hipMalloc(&g_push_reser, XSIZE*YSIZE); int *g_relabel_mask = NULL; hipMalloc(&g_relabel_mask, XSIZE*YSIZE); int *g_graph_height = NULL; hipMalloc(&g_graph_height, XSIZE*YSIZE); int *g_height_write = NULL; hipMalloc(&g_height_write, XSIZE*YSIZE); int graph_size = XSIZE*YSIZE; int width = XSIZE; int rows = XSIZE; int graph_size1 = XSIZE*YSIZE; int width1 = XSIZE; int rows1 = 1; int *d_relabel = NULL; hipMalloc(&d_relabel, XSIZE*YSIZE); int *d_stochastic = NULL; hipMalloc(&d_stochastic, XSIZE*YSIZE); int *d_counter = NULL; hipMalloc(&d_counter, XSIZE*YSIZE); bool *d_finish = NULL; hipMalloc(&d_finish, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel_push1_start_stochastic), dim3(gridBlock),dim3(threadBlock), 0, 0, g_left_weight,g_right_weight,g_down_weight,g_up_weight,g_sink_weight,g_push_reser,g_relabel_mask,g_graph_height,g_height_write,graph_size,width,rows,graph_size1,width1,rows1,d_relabel,d_stochastic,d_counter,d_finish); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel_push1_start_stochastic), dim3(gridBlock),dim3(threadBlock), 0, 0, g_left_weight,g_right_weight,g_down_weight,g_up_weight,g_sink_weight,g_push_reser,g_relabel_mask,g_graph_height,g_height_write,graph_size,width,rows,graph_size1,width1,rows1,d_relabel,d_stochastic,d_counter,d_finish); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel_push1_start_stochastic), dim3(gridBlock),dim3(threadBlock), 0, 0, g_left_weight,g_right_weight,g_down_weight,g_up_weight,g_sink_weight,g_push_reser,g_relabel_mask,g_graph_height,g_height_write,graph_size,width,rows,graph_size1,width1,rows1,d_relabel,d_stochastic,d_counter,d_finish); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c989b080a69b92c8f134ecb8af4fa03c13f22ee8.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel_push1_start_stochastic.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *g_left_weight = NULL; cudaMalloc(&g_left_weight, XSIZE*YSIZE); int *g_right_weight = NULL; cudaMalloc(&g_right_weight, XSIZE*YSIZE); int *g_down_weight = NULL; cudaMalloc(&g_down_weight, XSIZE*YSIZE); int *g_up_weight = NULL; cudaMalloc(&g_up_weight, XSIZE*YSIZE); int *g_sink_weight = NULL; cudaMalloc(&g_sink_weight, XSIZE*YSIZE); int *g_push_reser = NULL; cudaMalloc(&g_push_reser, XSIZE*YSIZE); int *g_relabel_mask = NULL; cudaMalloc(&g_relabel_mask, XSIZE*YSIZE); int *g_graph_height = NULL; cudaMalloc(&g_graph_height, XSIZE*YSIZE); int *g_height_write = NULL; cudaMalloc(&g_height_write, XSIZE*YSIZE); int graph_size = XSIZE*YSIZE; int width = XSIZE; int rows = XSIZE; int graph_size1 = XSIZE*YSIZE; int width1 = XSIZE; int rows1 = 1; int *d_relabel = NULL; cudaMalloc(&d_relabel, XSIZE*YSIZE); int *d_stochastic = NULL; cudaMalloc(&d_stochastic, XSIZE*YSIZE); int *d_counter = NULL; cudaMalloc(&d_counter, XSIZE*YSIZE); bool *d_finish = NULL; cudaMalloc(&d_finish, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel_push1_start_stochastic<<<gridBlock,threadBlock>>>(g_left_weight,g_right_weight,g_down_weight,g_up_weight,g_sink_weight,g_push_reser,g_relabel_mask,g_graph_height,g_height_write,graph_size,width,rows,graph_size1,width1,rows1,d_relabel,d_stochastic,d_counter,d_finish); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel_push1_start_stochastic<<<gridBlock,threadBlock>>>(g_left_weight,g_right_weight,g_down_weight,g_up_weight,g_sink_weight,g_push_reser,g_relabel_mask,g_graph_height,g_height_write,graph_size,width,rows,graph_size1,width1,rows1,d_relabel,d_stochastic,d_counter,d_finish); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel_push1_start_stochastic<<<gridBlock,threadBlock>>>(g_left_weight,g_right_weight,g_down_weight,g_up_weight,g_sink_weight,g_push_reser,g_relabel_mask,g_graph_height,g_height_write,graph_size,width,rows,graph_size1,width1,rows1,d_relabel,d_stochastic,d_counter,d_finish); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
edad37143d5e3d8d301687147b5fcde4c728fe08.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "DetectionPostProcessPlugin.hpp" namespace MNN { template <typename T> __global__ void decodeBoxes_kernel(const int count, const float* boxesPtr, const CenterSizeEncoding* anchorsPtr, BoxCornerEncoding* decodeBoxesPtr, const CenterSizeEncoding& scaleValues, int numBoxes, int boxCoordNum, int anchorsCoordNum, int numAnchors1) { CUDA_KERNEL_LOOP(idx, count) { const int boxIndex = idx * boxCoordNum; CenterSizeEncoding boxCenterSize = *reinterpret_cast<const CenterSizeEncoding*>(boxesPtr + boxIndex); CenterSizeEncoding anchor = anchorsPtr[idx]; float ycenter = boxCenterSize.y / scaleValues.y * anchor.h + anchor.y; float xcenter = boxCenterSize.x / scaleValues.x * anchor.w + anchor.x; float halfh = 0.5f * static_cast<float>(exp(boxCenterSize.h / scaleValues.h)) * anchor.h; float halfw = 0.5f * static_cast<float>(exp(boxCenterSize.w / scaleValues.w)) * anchor.w; auto& curBox = decodeBoxesPtr[idx]; curBox.ymin = ycenter - halfh; curBox.xmin = xcenter - halfw; curBox.ymax = ycenter + halfh; curBox.xmax = xcenter + halfw; } } template <typename T> __global__ void maxScores_kernel(const int count, const float* scoresStartPtr, int numClassWithBackground, int labelOffset, int* sortedClassIndicesPtr, int numClasses, int numCategoriesPerAnchor, float* maxScores){ CUDA_KERNEL_LOOP(idx, count) { const auto boxScores = scoresStartPtr + idx * numClassWithBackground + labelOffset; int* classIndices = sortedClassIndicesPtr + idx * numClasses; // iota(classIndices, numClasses, 0); int data = 0; for(int i = 0; i < numClasses; i++){ classIndices[i] = data; data += 1; } // std::partial_sort(classIndices, classIndices + numCategoriesPerAnchor, classIndices + numClasses, // [&boxScores](const int i, const int j) { return boxScores[i] > boxScores[j]; }); int score = classIndices[0]; for(int i = 0; i < numClasses; i++){ score = max(classIndices[i], score); } maxScores[idx] = boxScores[score]; } } template <typename T> __global__ void copy_candidate(const int count, Candidate* candidatePtr, const float* score){ CUDA_KERNEL_LOOP(idx, count) { int index = 0; for(int i = 0; i < count; i++){ if(score[idx] < score[i]){ index++; } } candidatePtr[idx].index = index; candidatePtr[idx].boxIndex = idx; candidatePtr[idx].score = score[idx]; } } __device__ __forceinline__ float IOU(const float* boxes, int i, int j) { const float yMinI = min(boxes[i * 4 + 0], boxes[i * 4 + 2]); const float xMinI = min(boxes[i * 4 + 1], boxes[i * 4 + 3]); const float yMaxI = max(boxes[i * 4 + 0], boxes[i * 4 + 2]); const float xMaxI = max(boxes[i * 4 + 1], boxes[i * 4 + 3]); const float yMinJ = min(boxes[j * 4 + 0], boxes[j * 4 + 2]); const float xMinJ = min(boxes[j * 4 + 1], boxes[j * 4 + 3]); const float yMaxJ = max(boxes[j * 4 + 0], boxes[j * 4 + 2]); const float xMaxJ = max(boxes[j * 4 + 1], boxes[j * 4 + 3]); const float areaI = (yMaxI - yMinI) * (xMaxI - xMinI); const float areaJ = (yMaxJ - yMinJ) * (xMaxJ - xMinJ); if (areaI <= 0 || areaJ <= 0) return 0.0; const float intersectionYMin = max(yMinI, yMinJ); const float intersectionXMin = max(xMinI, xMinJ); const float intersectionYMax = min(yMaxI, yMaxJ); const float intersectionXMax = min(xMaxI, xMaxJ); const float intersectionArea = max(intersectionYMax - intersectionYMin, 0.0) * max(intersectionXMax - intersectionXMin, 0.0); return intersectionArea / (areaI + areaJ - intersectionArea); } template <typename T> __global__ void nms_kernel(const int count, int numBoxes, float scoreThreshold, float iouThreshold, Candidate* candidatePtr, int* selectedSize, float* decodedBoxesPtr, int* selectedPtr){ CUDA_KERNEL_LOOP(idx, count) { int boxIndex = 0; float originalScore = 0; for(int i = 0; i < numBoxes; i++){ if(candidatePtr[i].index == idx){ boxIndex = candidatePtr[i].boxIndex; originalScore = candidatePtr[i].score; } } if(originalScore <= scoreThreshold){ return; } bool shouldSelect = true; for (int j = (selectedSize[0] - 1); j >= 0; --j) { float iou = IOU(decodedBoxesPtr, boxIndex, selectedPtr[j]); if (iou == 0.0) { continue; } if (iou > iouThreshold) { shouldSelect = false; } } if (shouldSelect) { selectedPtr[selectedSize[0]] = boxIndex; atomicAdd(selectedSize, 1); } } } template <typename T> __global__ void set_output(const int count, const BoxCornerEncoding* decodedBoxesPtr, BoxCornerEncoding* detectionBoxesPtr, float* detectionClassesPtr, float* detectionScoresPtr, float* numDetectionsPtr, const float* scoresStartPtr, int numClassWithBackground, int labelOffset, int* sortedClassIndicesPtr, int numClasses, int numCategoriesPerAnchor, int* selectedPtr){ CUDA_KERNEL_LOOP(index, count) { int selectedIndex = selectedPtr[index]; const float* boxScores = scoresStartPtr + selectedIndex * numClassWithBackground + labelOffset; const int* classIndices = sortedClassIndicesPtr + selectedIndex * numClasses; for (int col = 0; col < numCategoriesPerAnchor; ++col) { int boxOffset = numCategoriesPerAnchor * numDetectionsPtr[0] + col; detectionBoxesPtr[boxOffset] = decodedBoxesPtr[selectedIndex]; detectionClassesPtr[boxOffset] = classIndices[col]; detectionScoresPtr[boxOffset] = boxScores[classIndices[col]]; atomicAdd(numDetectionsPtr, 1); } } } void DetectionPostProcessPlugin::decodeBoxes(nvinfer1::DataType dataType, const int count, const void *const * inputs, const void *const * outputs, const void * scaleValues, void * decodeBoxes, int numBoxes, int boxCoordNum, int anchorsCoordNum, int numAnchors1) { auto boxesEncoding = inputs[0]; auto anchors = inputs[2]; if (dataType == nvinfer1::DataType::kFLOAT){ returnhipLaunchKernelGGL(( decodeBoxes_kernel<float>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, (float*)boxesEncoding, reinterpret_cast<const CenterSizeEncoding*>(anchors), reinterpret_cast<BoxCornerEncoding*>(decodeBoxes), *reinterpret_cast<const CenterSizeEncoding*>(scaleValues), numBoxes, boxCoordNum, anchorsCoordNum, numAnchors1); }else{ returnhipLaunchKernelGGL(( decodeBoxes_kernel<__half>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, (float*)boxesEncoding, reinterpret_cast<const CenterSizeEncoding*>(anchors), reinterpret_cast<BoxCornerEncoding*>(decodeBoxes), *reinterpret_cast<const CenterSizeEncoding*>(scaleValues), numBoxes, boxCoordNum, anchorsCoordNum, numAnchors1); } } void DetectionPostProcessPlugin::maxScores(nvinfer1::DataType dataType, const int count, const void *const * inputs, const void *const * outputs, int numClassWithBackground, int* sortedClassIndicesPtr, int numClasses, float* maxScores, int maxClassesPerAnchor) { auto classPredictions = inputs[1]; const int labelOffset = numClassWithBackground - numClasses; int numCategoriesPerAnchor = ::min(maxClassesPerAnchor, numClasses); hipLaunchKernelGGL(( maxScores_kernel<float>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, (const float*)classPredictions, numClassWithBackground, labelOffset, sortedClassIndicesPtr, numClasses, numCategoriesPerAnchor, maxScores); } void DetectionPostProcessPlugin::NMSSingleClasss(float* decodedBoxesPtr, const float* scoresPtr, int maxDetections, float iouThreshold, float scoreThreshold, int* selectedPtr, int* selectedSize, int numBoxes, int outputNum, Candidate* candidate, Candidate* mCandidatePriorityQueue){ hipLaunchKernelGGL(( copy_candidate<float>), dim3(CAFFE_GET_BLOCKS(numBoxes)), dim3(CUDA_NUM_THREADS), 0, 0, numBoxes, candidate, scoresPtr); hipLaunchKernelGGL(( nms_kernel<float>), dim3(CAFFE_GET_BLOCKS(outputNum)), dim3(CUDA_NUM_THREADS), 0, 0, outputNum, numBoxes, scoreThreshold, iouThreshold, candidate, selectedSize, decodedBoxesPtr, selectedPtr); } void DetectionPostProcessPlugin::setOutput(const int selectSize, const BoxCornerEncoding* decodedBoxesPtr, BoxCornerEncoding* detectionBoxesPtr, float* detectionClassesPtr, float* detectionScoresPtr, float* numDetectionsPtr, const float* scoresStartPtr, int numClassWithBackground, int labelOffset, int* sortedClassIndicesPtr, int numClasses, int numCategoriesPerAnchor, int* selectedPtr){ hipLaunchKernelGGL(( set_output<float>), dim3(CAFFE_GET_BLOCKS(selectSize)), dim3(CUDA_NUM_THREADS), 0, 0, selectSize, decodedBoxesPtr, detectionBoxesPtr, detectionClassesPtr, detectionScoresPtr, numDetectionsPtr, scoresStartPtr, numClassWithBackground, labelOffset, sortedClassIndicesPtr, numClasses, numCategoriesPerAnchor, selectedPtr); } }; // namespace MNN
edad37143d5e3d8d301687147b5fcde4c728fe08.cu
#include "DetectionPostProcessPlugin.hpp" namespace MNN { template <typename T> __global__ void decodeBoxes_kernel(const int count, const float* boxesPtr, const CenterSizeEncoding* anchorsPtr, BoxCornerEncoding* decodeBoxesPtr, const CenterSizeEncoding& scaleValues, int numBoxes, int boxCoordNum, int anchorsCoordNum, int numAnchors1) { CUDA_KERNEL_LOOP(idx, count) { const int boxIndex = idx * boxCoordNum; CenterSizeEncoding boxCenterSize = *reinterpret_cast<const CenterSizeEncoding*>(boxesPtr + boxIndex); CenterSizeEncoding anchor = anchorsPtr[idx]; float ycenter = boxCenterSize.y / scaleValues.y * anchor.h + anchor.y; float xcenter = boxCenterSize.x / scaleValues.x * anchor.w + anchor.x; float halfh = 0.5f * static_cast<float>(exp(boxCenterSize.h / scaleValues.h)) * anchor.h; float halfw = 0.5f * static_cast<float>(exp(boxCenterSize.w / scaleValues.w)) * anchor.w; auto& curBox = decodeBoxesPtr[idx]; curBox.ymin = ycenter - halfh; curBox.xmin = xcenter - halfw; curBox.ymax = ycenter + halfh; curBox.xmax = xcenter + halfw; } } template <typename T> __global__ void maxScores_kernel(const int count, const float* scoresStartPtr, int numClassWithBackground, int labelOffset, int* sortedClassIndicesPtr, int numClasses, int numCategoriesPerAnchor, float* maxScores){ CUDA_KERNEL_LOOP(idx, count) { const auto boxScores = scoresStartPtr + idx * numClassWithBackground + labelOffset; int* classIndices = sortedClassIndicesPtr + idx * numClasses; // iota(classIndices, numClasses, 0); int data = 0; for(int i = 0; i < numClasses; i++){ classIndices[i] = data; data += 1; } // std::partial_sort(classIndices, classIndices + numCategoriesPerAnchor, classIndices + numClasses, // [&boxScores](const int i, const int j) { return boxScores[i] > boxScores[j]; }); int score = classIndices[0]; for(int i = 0; i < numClasses; i++){ score = max(classIndices[i], score); } maxScores[idx] = boxScores[score]; } } template <typename T> __global__ void copy_candidate(const int count, Candidate* candidatePtr, const float* score){ CUDA_KERNEL_LOOP(idx, count) { int index = 0; for(int i = 0; i < count; i++){ if(score[idx] < score[i]){ index++; } } candidatePtr[idx].index = index; candidatePtr[idx].boxIndex = idx; candidatePtr[idx].score = score[idx]; } } __device__ __forceinline__ float IOU(const float* boxes, int i, int j) { const float yMinI = min(boxes[i * 4 + 0], boxes[i * 4 + 2]); const float xMinI = min(boxes[i * 4 + 1], boxes[i * 4 + 3]); const float yMaxI = max(boxes[i * 4 + 0], boxes[i * 4 + 2]); const float xMaxI = max(boxes[i * 4 + 1], boxes[i * 4 + 3]); const float yMinJ = min(boxes[j * 4 + 0], boxes[j * 4 + 2]); const float xMinJ = min(boxes[j * 4 + 1], boxes[j * 4 + 3]); const float yMaxJ = max(boxes[j * 4 + 0], boxes[j * 4 + 2]); const float xMaxJ = max(boxes[j * 4 + 1], boxes[j * 4 + 3]); const float areaI = (yMaxI - yMinI) * (xMaxI - xMinI); const float areaJ = (yMaxJ - yMinJ) * (xMaxJ - xMinJ); if (areaI <= 0 || areaJ <= 0) return 0.0; const float intersectionYMin = max(yMinI, yMinJ); const float intersectionXMin = max(xMinI, xMinJ); const float intersectionYMax = min(yMaxI, yMaxJ); const float intersectionXMax = min(xMaxI, xMaxJ); const float intersectionArea = max(intersectionYMax - intersectionYMin, 0.0) * max(intersectionXMax - intersectionXMin, 0.0); return intersectionArea / (areaI + areaJ - intersectionArea); } template <typename T> __global__ void nms_kernel(const int count, int numBoxes, float scoreThreshold, float iouThreshold, Candidate* candidatePtr, int* selectedSize, float* decodedBoxesPtr, int* selectedPtr){ CUDA_KERNEL_LOOP(idx, count) { int boxIndex = 0; float originalScore = 0; for(int i = 0; i < numBoxes; i++){ if(candidatePtr[i].index == idx){ boxIndex = candidatePtr[i].boxIndex; originalScore = candidatePtr[i].score; } } if(originalScore <= scoreThreshold){ return; } bool shouldSelect = true; for (int j = (selectedSize[0] - 1); j >= 0; --j) { float iou = IOU(decodedBoxesPtr, boxIndex, selectedPtr[j]); if (iou == 0.0) { continue; } if (iou > iouThreshold) { shouldSelect = false; } } if (shouldSelect) { selectedPtr[selectedSize[0]] = boxIndex; atomicAdd(selectedSize, 1); } } } template <typename T> __global__ void set_output(const int count, const BoxCornerEncoding* decodedBoxesPtr, BoxCornerEncoding* detectionBoxesPtr, float* detectionClassesPtr, float* detectionScoresPtr, float* numDetectionsPtr, const float* scoresStartPtr, int numClassWithBackground, int labelOffset, int* sortedClassIndicesPtr, int numClasses, int numCategoriesPerAnchor, int* selectedPtr){ CUDA_KERNEL_LOOP(index, count) { int selectedIndex = selectedPtr[index]; const float* boxScores = scoresStartPtr + selectedIndex * numClassWithBackground + labelOffset; const int* classIndices = sortedClassIndicesPtr + selectedIndex * numClasses; for (int col = 0; col < numCategoriesPerAnchor; ++col) { int boxOffset = numCategoriesPerAnchor * numDetectionsPtr[0] + col; detectionBoxesPtr[boxOffset] = decodedBoxesPtr[selectedIndex]; detectionClassesPtr[boxOffset] = classIndices[col]; detectionScoresPtr[boxOffset] = boxScores[classIndices[col]]; atomicAdd(numDetectionsPtr, 1); } } } void DetectionPostProcessPlugin::decodeBoxes(nvinfer1::DataType dataType, const int count, const void *const * inputs, const void *const * outputs, const void * scaleValues, void * decodeBoxes, int numBoxes, int boxCoordNum, int anchorsCoordNum, int numAnchors1) { auto boxesEncoding = inputs[0]; auto anchors = inputs[2]; if (dataType == nvinfer1::DataType::kFLOAT){ return decodeBoxes_kernel<float><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, (float*)boxesEncoding, reinterpret_cast<const CenterSizeEncoding*>(anchors), reinterpret_cast<BoxCornerEncoding*>(decodeBoxes), *reinterpret_cast<const CenterSizeEncoding*>(scaleValues), numBoxes, boxCoordNum, anchorsCoordNum, numAnchors1); }else{ return decodeBoxes_kernel<__half><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, (float*)boxesEncoding, reinterpret_cast<const CenterSizeEncoding*>(anchors), reinterpret_cast<BoxCornerEncoding*>(decodeBoxes), *reinterpret_cast<const CenterSizeEncoding*>(scaleValues), numBoxes, boxCoordNum, anchorsCoordNum, numAnchors1); } } void DetectionPostProcessPlugin::maxScores(nvinfer1::DataType dataType, const int count, const void *const * inputs, const void *const * outputs, int numClassWithBackground, int* sortedClassIndicesPtr, int numClasses, float* maxScores, int maxClassesPerAnchor) { auto classPredictions = inputs[1]; const int labelOffset = numClassWithBackground - numClasses; int numCategoriesPerAnchor = std::min(maxClassesPerAnchor, numClasses); maxScores_kernel<float><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, (const float*)classPredictions, numClassWithBackground, labelOffset, sortedClassIndicesPtr, numClasses, numCategoriesPerAnchor, maxScores); } void DetectionPostProcessPlugin::NMSSingleClasss(float* decodedBoxesPtr, const float* scoresPtr, int maxDetections, float iouThreshold, float scoreThreshold, int* selectedPtr, int* selectedSize, int numBoxes, int outputNum, Candidate* candidate, Candidate* mCandidatePriorityQueue){ copy_candidate<float><<<CAFFE_GET_BLOCKS(numBoxes), CUDA_NUM_THREADS>>>(numBoxes, candidate, scoresPtr); nms_kernel<float><<<CAFFE_GET_BLOCKS(outputNum), CUDA_NUM_THREADS>>>(outputNum, numBoxes, scoreThreshold, iouThreshold, candidate, selectedSize, decodedBoxesPtr, selectedPtr); } void DetectionPostProcessPlugin::setOutput(const int selectSize, const BoxCornerEncoding* decodedBoxesPtr, BoxCornerEncoding* detectionBoxesPtr, float* detectionClassesPtr, float* detectionScoresPtr, float* numDetectionsPtr, const float* scoresStartPtr, int numClassWithBackground, int labelOffset, int* sortedClassIndicesPtr, int numClasses, int numCategoriesPerAnchor, int* selectedPtr){ set_output<float><<<CAFFE_GET_BLOCKS(selectSize), CUDA_NUM_THREADS>>>(selectSize, decodedBoxesPtr, detectionBoxesPtr, detectionClassesPtr, detectionScoresPtr, numDetectionsPtr, scoresStartPtr, numClassWithBackground, labelOffset, sortedClassIndicesPtr, numClasses, numCategoriesPerAnchor, selectedPtr); } }; // namespace MNN
0c2aff43088ae97c6d44353ef1e2501f89e0bc05.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Available optimizations (value should be used as the first parameter in the command line): 0 - Base -> no optimization 1 - Sham -> shared memory 2 - ZintReg -> for iteration on Z axis (Paulius) 3 - Zint -> for iteration on Z axis without using registers 4 - ShamZintReg -> shared memory + for iteration on Z axis 5 - ShamZint -> shared memory + for iteration on Z axis without registers 6 - ShamZintTempReg -> shared memory + for iteration on Z axis + temporal blocking 7 - Roc -> use of read only cache (__restrict__ and const modifiers) 8 - ShamRoc -> use of shared memory + read only cache (__restrict__ and const modifiers) 9 - RocZintReg -> for iteration on Z axis + read only cache 10 - RocZint -> for iteration on Z axis without registers + read only cache 11 - ShamRocZintTempReg -> shared memory + read only cache + for iteration on Z axis + temporal blocking Known limitations: data grid size must be multiple of BLOCK_SIZE */ #include <stdio.h> //#define PRINT_GOLD //#define PRINT_RESULT #define BLOCK_DIMX 32 #define BLOCK_DIMY 16 #define BLOCK_DIMZ 1 #define RADIUS 5 // Half of the order #define PADDING_SIZE 32 // Error checking function #define wbCheck(stmt) do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ printf("ERROR: Failed to run stmt %s\n", #stmt); \ printf("ERROR: Got CUDA error ... %s\n", hipGetErrorString(err)); \ return -1; \ } \ } while(0) __constant__ float coeff[RADIUS*6+1]; /* Optimization Base: baseline code (no optimization) */ __global__ void calcStencilBase(float *a, float *b, int pitchedDimx, int dimy) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Compute stencil b[index] = coeff[0] * a[index] + coeff[1] * a[index - 5] + coeff[2] * a[index - 4] + coeff[3] * a[index - 3] + coeff[4] * a[index - 2] + coeff[5] * a[index - 1] + coeff[6] * a[index + 1] + coeff[7] * a[index + 2] + coeff[8] * a[index + 3] + coeff[9] * a[index + 4] + coeff[10] * a[index + 5] + coeff[11] * a[index - 5*pitchedDimx] + coeff[12] * a[index - 4*pitchedDimx] + coeff[13] * a[index - 3*pitchedDimx] + coeff[14] * a[index - 2*pitchedDimx] + coeff[15] * a[index - pitchedDimx] + coeff[16] * a[index + pitchedDimx] + coeff[17] * a[index + 2*pitchedDimx] + coeff[18] * a[index + 3*pitchedDimx] + coeff[19] * a[index + 4*pitchedDimx] + coeff[20] * a[index + 5*pitchedDimx] + coeff[21] * a[index - 5*stride] + coeff[22] * a[index - 4*stride] + coeff[23] * a[index - 3*stride] + coeff[24] * a[index - 2*stride] + coeff[25] * a[index - stride] + coeff[26] * a[index + stride] + coeff[27] * a[index + 2*stride] + coeff[28] * a[index + 3*stride] + coeff[29] * a[index + 4*stride] + coeff[30] * a[index + 5*stride]; } /* Optimization Sham: shared memory */ __global__ void calcStencilSham(float *a, float *b, int pitchedDimx, int dimy) { // Shared Memory Declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = a[index - (RADIUS*pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = a[index + (BLOCK_DIMY*pitchedDimx)]; } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = a[index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = a[index + BLOCK_DIMX]; } // Load current position to shared memory ds_a[ty][sharedTx] = a[index]; __syncthreads(); // Compute stencil b[index] = coeff[0] * ds_a[ty][sharedTx] + coeff[1] * ds_a[ty][sharedTx - 5] + coeff[2] * ds_a[ty][sharedTx - 4] + coeff[3] * ds_a[ty][sharedTx - 3] + coeff[4] * ds_a[ty][sharedTx - 2] + coeff[5] * ds_a[ty][sharedTx - 1] + coeff[6] * ds_a[ty][sharedTx + 1] + coeff[7] * ds_a[ty][sharedTx + 2] + coeff[8] * ds_a[ty][sharedTx + 3] + coeff[9] * ds_a[ty][sharedTx + 4] + coeff[10] * ds_a[ty][sharedTx + 5] + coeff[11] * ds_a[ty - 5][sharedTx] + coeff[12] * ds_a[ty - 4][sharedTx] + coeff[13] * ds_a[ty - 3][sharedTx] + coeff[14] * ds_a[ty - 2][sharedTx] + coeff[15] * ds_a[ty - 1][sharedTx] + coeff[16] * ds_a[ty + 1][sharedTx] + coeff[17] * ds_a[ty + 2][sharedTx] + coeff[18] * ds_a[ty + 3][sharedTx] + coeff[19] * ds_a[ty + 4][sharedTx] + coeff[20] * ds_a[ty + 5][sharedTx] + coeff[21] * a[index - 5*stride] + coeff[22] * a[index - 4*stride] + coeff[23] * a[index - 3*stride] + coeff[24] * a[index - 2*stride] + coeff[25] * a[index - stride] + coeff[26] * a[index + stride] + coeff[27] * a[index + 2*stride] + coeff[28] * a[index + 3*stride] + coeff[29] * a[index + 4*stride] + coeff[30] * a[index + 5*stride]; } /* Optimization ZintReg: for iteration on Z axis with registers */ __global__ void calcStencilZintReg(float *a, float *b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output register float infront1, infront2, infront3, infront4, infront5; // Variable to store the value in front (in the Z axis) of the current slice register float behind1, behind2, behind3, behind4, behind5; // Variable to store the value behind (in the Z axis) the current slice register float current; // Input value in the current slice // Load initial values (behind5 will be loaded inside the next 'for') behind4 = a[in_index]; in_index += stride; behind3 = a[in_index]; in_index += stride; behind2 = a[in_index]; in_index += stride; behind1 = a[in_index]; in_index += stride; current = a[in_index]; out_index = in_index; in_index += stride; infront1 = a[in_index]; in_index += stride; infront2 = a[in_index]; in_index += stride; infront3 = a[in_index]; in_index += stride; infront4 = a[in_index]; in_index += stride; infront5 = a[in_index]; in_index += stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { // Load the new values in Z axis behind5 = behind4; behind4 = behind3; behind3 = behind2; behind2 = behind1; behind1 = current; current = infront1; infront1 = infront2; infront2 = infront3; infront3 = infront4; infront4 = infront5; infront5 = a[in_index]; in_index += stride; out_index += stride; // Compute stencil b[out_index] = coeff[0] * current + coeff[1] * a[out_index - 5] + coeff[2] * a[out_index - 4] + coeff[3] * a[out_index - 3] + coeff[4] * a[out_index - 2] + coeff[5] * a[out_index - 1] + coeff[6] * a[out_index + 1] + coeff[7] * a[out_index + 2] + coeff[8] * a[out_index + 3] + coeff[9] * a[out_index + 4] + coeff[10] * a[out_index + 5] + coeff[11] * a[out_index - 5*pitchedDimx] + coeff[12] * a[out_index - 4*pitchedDimx] + coeff[13] * a[out_index - 3*pitchedDimx] + coeff[14] * a[out_index - 2*pitchedDimx] + coeff[15] * a[out_index - pitchedDimx] + coeff[16] * a[out_index + pitchedDimx] + coeff[17] * a[out_index + 2*pitchedDimx] + coeff[18] * a[out_index + 3*pitchedDimx] + coeff[19] * a[out_index + 4*pitchedDimx] + coeff[20] * a[out_index + 5*pitchedDimx] + coeff[21] * behind5 + coeff[22] * behind4 + coeff[23] * behind3 + coeff[24] * behind2 + coeff[25] * behind1 + coeff[26] * infront1 + coeff[27] * infront2 + coeff[28] * infront3 + coeff[29] * infront4 + coeff[30] * infront5; } } /* Optimization Zint: for iteration on Z axis without using registers */ __global__ void calcStencilZint(float *a, float *b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int out_index = (row * pitchedDimx) + col; // Index for writing output out_index += 4*stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { out_index += stride; // Compute stencil b[out_index] = coeff[0] * a[out_index] + coeff[1] * a[out_index - 5] + coeff[2] * a[out_index - 4] + coeff[3] * a[out_index - 3] + coeff[4] * a[out_index - 2] + coeff[5] * a[out_index - 1] + coeff[6] * a[out_index + 1] + coeff[7] * a[out_index + 2] + coeff[8] * a[out_index + 3] + coeff[9] * a[out_index + 4] + coeff[10] * a[out_index + 5] + coeff[11] * a[out_index - 5*pitchedDimx] + coeff[12] * a[out_index - 4*pitchedDimx] + coeff[13] * a[out_index - 3*pitchedDimx] + coeff[14] * a[out_index - 2*pitchedDimx] + coeff[15] * a[out_index - pitchedDimx] + coeff[16] * a[out_index + pitchedDimx] + coeff[17] * a[out_index + 2*pitchedDimx] + coeff[18] * a[out_index + 3*pitchedDimx] + coeff[19] * a[out_index + 4*pitchedDimx] + coeff[20] * a[out_index + 5*pitchedDimx] + coeff[21] * a[out_index - 5*stride] + coeff[22] * a[out_index - 4*stride] + coeff[23] * a[out_index - 3*stride] + coeff[24] * a[out_index - 2*stride] + coeff[25] * a[out_index - stride] + coeff[26] * a[out_index + stride] + coeff[27] * a[out_index + 2*stride] + coeff[28] * a[out_index + 3*stride] + coeff[29] * a[out_index + 4*stride] + coeff[30] * a[out_index + 5*stride]; } } /* Optimization ShamZintReg: for iteration on Z axis + use of shared memory */ __global__ void calcStencilShamZintReg(float *a, float *b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output register float infront1, infront2, infront3, infront4, infront5; // Variable to store the value in front (in the Z axis) of the current slice register float behind1, behind2, behind3, behind4, behind5; // Variable to store the value behind (in the Z axis) the current slice register float current; // Input value in the current slice // Load initial values (behind5 will be loaded inside the next 'for') behind4 = a[in_index]; in_index += stride; behind3 = a[in_index]; in_index += stride; behind2 = a[in_index]; in_index += stride; behind1 = a[in_index]; in_index += stride; current = a[in_index]; out_index = in_index; in_index += stride; infront1 = a[in_index]; in_index += stride; infront2 = a[in_index]; in_index += stride; infront3 = a[in_index]; in_index += stride; infront4 = a[in_index]; in_index += stride; infront5 = a[in_index]; in_index += stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { // Load the new values in Z axis behind5 = behind4; behind4 = behind3; behind3 = behind2; behind2 = behind1; behind1 = current; current = infront1; infront1 = infront2; infront2 = infront3; infront3 = infront4; infront4 = infront5; infront5 = a[in_index]; in_index += stride; out_index += stride; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = a[out_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = a[out_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = a[out_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = a[out_index + BLOCK_DIMX]; } // Load current position to shared memory ds_a[ty][sharedTx] = current; __syncthreads(); // Compute stencil b[out_index] = coeff[0] * current + coeff[1] * ds_a[ty][sharedTx - 5] + coeff[2] * ds_a[ty][sharedTx - 4] + coeff[3] * ds_a[ty][sharedTx - 3] + coeff[4] * ds_a[ty][sharedTx - 2] + coeff[5] * ds_a[ty][sharedTx - 1] + coeff[6] * ds_a[ty][sharedTx + 1] + coeff[7] * ds_a[ty][sharedTx + 2] + coeff[8] * ds_a[ty][sharedTx + 3] + coeff[9] * ds_a[ty][sharedTx + 4] + coeff[10] * ds_a[ty][sharedTx + 5] + coeff[11] * ds_a[ty - 5][sharedTx] + coeff[12] * ds_a[ty - 4][sharedTx] + coeff[13] * ds_a[ty - 3][sharedTx] + coeff[14] * ds_a[ty - 2][sharedTx] + coeff[15] * ds_a[ty - 1][sharedTx] + coeff[16] * ds_a[ty + 1][sharedTx] + coeff[17] * ds_a[ty + 2][sharedTx] + coeff[18] * ds_a[ty + 3][sharedTx] + coeff[19] * ds_a[ty + 4][sharedTx] + coeff[20] * ds_a[ty + 5][sharedTx] + coeff[21] * behind5 + coeff[22] * behind4 + coeff[23] * behind3 + coeff[24] * behind2 + coeff[25] * behind1 + coeff[26] * infront1 + coeff[27] * infront2 + coeff[28] * infront3 + coeff[29] * infront4 + coeff[30] * infront5; } } /* Optimization ShamZint: for iteration on Z axis without registers + use of shared memory */ __global__ void calcStencilShamZint(float *a, float *b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int out_index = (row * pitchedDimx) + col; // Index for writing output out_index += 4*stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { out_index += stride; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = a[out_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = a[out_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = a[out_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = a[out_index + BLOCK_DIMX]; } // Load current position to shared memory ds_a[ty][sharedTx] = a[out_index]; __syncthreads(); // Compute stencil b[out_index] = coeff[0] * ds_a[ty][sharedTx] + coeff[1] * ds_a[ty][sharedTx - 5] + coeff[2] * ds_a[ty][sharedTx - 4] + coeff[3] * ds_a[ty][sharedTx - 3] + coeff[4] * ds_a[ty][sharedTx - 2] + coeff[5] * ds_a[ty][sharedTx - 1] + coeff[6] * ds_a[ty][sharedTx + 1] + coeff[7] * ds_a[ty][sharedTx + 2] + coeff[8] * ds_a[ty][sharedTx + 3] + coeff[9] * ds_a[ty][sharedTx + 4] + coeff[10] * ds_a[ty][sharedTx + 5] + coeff[11] * ds_a[ty - 5][sharedTx] + coeff[12] * ds_a[ty - 4][sharedTx] + coeff[13] * ds_a[ty - 3][sharedTx] + coeff[14] * ds_a[ty - 2][sharedTx] + coeff[15] * ds_a[ty - 1][sharedTx] + coeff[16] * ds_a[ty + 1][sharedTx] + coeff[17] * ds_a[ty + 2][sharedTx] + coeff[18] * ds_a[ty + 3][sharedTx] + coeff[19] * ds_a[ty + 4][sharedTx] + coeff[20] * ds_a[ty + 5][sharedTx] + coeff[21] * a[out_index - 5*stride] + coeff[22] * a[out_index - 4*stride] + coeff[23] * a[out_index - 3*stride] + coeff[24] * a[out_index - 2*stride] + coeff[25] * a[out_index - stride] + coeff[26] * a[out_index + stride] + coeff[27] * a[out_index + 2*stride] + coeff[28] * a[out_index + 3*stride] + coeff[29] * a[out_index + 4*stride] + coeff[30] * a[out_index + 5*stride]; } } /* Optimization ShamZintTempReg: shared memory + for iteration on Z axis + temporal blocking (will always compute 2 time iterations) */ __global__ void calcStencilShamZintTempReg(float *a, float *b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS][2]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int row = blockIdx.y * (BLOCK_DIMY-2*RADIUS) + ty; int col = blockIdx.x * (BLOCK_DIMX-2*RADIUS) + tx; int stride = pitchedDimx * (dimy + 4*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output int next_index = 0; // Index for plane Z = output + RADIUS // t0 = t + 0 register float t0_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t0_current; // Input value in the current slice // t1 = t + 1 register float t1_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t1_current; // Value in current slice for t+1 // Load ghost zones in_index += RADIUS*stride; t0_behind5 = a[in_index]; // Z = -R = -5 in_index += stride; t0_behind4 = a[in_index]; // Z = -R+1 = -4 in_index += stride; t0_behind3 = a[in_index]; // Z = -R+2 = -3 in_index += stride; t0_behind2 = a[in_index]; // Z = -R+3 = -2 in_index += stride; t0_behind1 = a[in_index]; // Z = -R+4 = -1 in_index += stride; out_index = in_index; // Index for writing output, Z = 0 t0_current = a[in_index]; // Z = 0 in_index += stride; next_index = in_index; // Z = 1 t0_infront1 = a[in_index]; // Z = 1 in_index += stride; t0_infront2 = a[in_index]; // Z = 2 in_index += stride; t0_infront3 = a[in_index]; // Z = 3 in_index += stride; t0_infront4 = a[in_index]; // Z = 4 in_index += stride; t0_infront5 = a[in_index]; // Z = R = 5 in_index += stride; // Load Z = 0 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[out_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[out_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[out_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[out_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 0 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) ) { t1_current = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 5][1] + coeff[2] * ds_a[ty][sharedTx - 4][1] + coeff[3] * ds_a[ty][sharedTx - 3][1] + coeff[4] * ds_a[ty][sharedTx - 2][1] + coeff[5] * ds_a[ty][sharedTx - 1][1] + coeff[6] * ds_a[ty][sharedTx + 1][1] + coeff[7] * ds_a[ty][sharedTx + 2][1] + coeff[8] * ds_a[ty][sharedTx + 3][1] + coeff[9] * ds_a[ty][sharedTx + 4][1] + coeff[10] * ds_a[ty][sharedTx + 5][1] + coeff[11] * ds_a[ty - 5][sharedTx][1] + coeff[12] * ds_a[ty - 4][sharedTx][1] + coeff[13] * ds_a[ty - 3][sharedTx][1] + coeff[14] * ds_a[ty - 2][sharedTx][1] + coeff[15] * ds_a[ty - 1][sharedTx][1] + coeff[16] * ds_a[ty + 1][sharedTx][1] + coeff[17] * ds_a[ty + 2][sharedTx][1] + coeff[18] * ds_a[ty + 3][sharedTx][1] + coeff[19] * ds_a[ty + 4][sharedTx][1] + coeff[20] * ds_a[ty + 5][sharedTx][1] + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_current = t0_current; } // Copy planes Z = -1 to -R to registers in t+1 (ghost zones, keep values in 0.0) t1_behind5 = t0_behind5; t1_behind4 = t0_behind4; t1_behind3 = t0_behind3; t1_behind2 = t0_behind2; t1_behind1 = t0_behind1; __syncthreads(); t0_behind5 = t0_behind4; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = a[in_index]; in_index += stride; // Load Z = 1 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 1 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront1 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 5][1] + coeff[2] * ds_a[ty][sharedTx - 4][1] + coeff[3] * ds_a[ty][sharedTx - 3][1] + coeff[4] * ds_a[ty][sharedTx - 2][1] + coeff[5] * ds_a[ty][sharedTx - 1][1] + coeff[6] * ds_a[ty][sharedTx + 1][1] + coeff[7] * ds_a[ty][sharedTx + 2][1] + coeff[8] * ds_a[ty][sharedTx + 3][1] + coeff[9] * ds_a[ty][sharedTx + 4][1] + coeff[10] * ds_a[ty][sharedTx + 5][1] + coeff[11] * ds_a[ty - 5][sharedTx][1] + coeff[12] * ds_a[ty - 4][sharedTx][1] + coeff[13] * ds_a[ty - 3][sharedTx][1] + coeff[14] * ds_a[ty - 2][sharedTx][1] + coeff[15] * ds_a[ty - 1][sharedTx][1] + coeff[16] * ds_a[ty + 1][sharedTx][1] + coeff[17] * ds_a[ty + 2][sharedTx][1] + coeff[18] * ds_a[ty + 3][sharedTx][1] + coeff[19] * ds_a[ty + 4][sharedTx][1] + coeff[20] * ds_a[ty + 5][sharedTx][1] + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront1 = t0_current; } __syncthreads(); t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = a[in_index]; in_index += stride; next_index += stride; // Load Z = 2 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 2 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront2 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 5][1] + coeff[2] * ds_a[ty][sharedTx - 4][1] + coeff[3] * ds_a[ty][sharedTx - 3][1] + coeff[4] * ds_a[ty][sharedTx - 2][1] + coeff[5] * ds_a[ty][sharedTx - 1][1] + coeff[6] * ds_a[ty][sharedTx + 1][1] + coeff[7] * ds_a[ty][sharedTx + 2][1] + coeff[8] * ds_a[ty][sharedTx + 3][1] + coeff[9] * ds_a[ty][sharedTx + 4][1] + coeff[10] * ds_a[ty][sharedTx + 5][1] + coeff[11] * ds_a[ty - 5][sharedTx][1] + coeff[12] * ds_a[ty - 4][sharedTx][1] + coeff[13] * ds_a[ty - 3][sharedTx][1] + coeff[14] * ds_a[ty - 2][sharedTx][1] + coeff[15] * ds_a[ty - 1][sharedTx][1] + coeff[16] * ds_a[ty + 1][sharedTx][1] + coeff[17] * ds_a[ty + 2][sharedTx][1] + coeff[18] * ds_a[ty + 3][sharedTx][1] + coeff[19] * ds_a[ty + 4][sharedTx][1] + coeff[20] * ds_a[ty + 5][sharedTx][1] + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront2 = t0_current; } __syncthreads(); t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = a[in_index]; in_index += stride; next_index += stride; // Load Z = 3 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 3 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront3 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 5][1] + coeff[2] * ds_a[ty][sharedTx - 4][1] + coeff[3] * ds_a[ty][sharedTx - 3][1] + coeff[4] * ds_a[ty][sharedTx - 2][1] + coeff[5] * ds_a[ty][sharedTx - 1][1] + coeff[6] * ds_a[ty][sharedTx + 1][1] + coeff[7] * ds_a[ty][sharedTx + 2][1] + coeff[8] * ds_a[ty][sharedTx + 3][1] + coeff[9] * ds_a[ty][sharedTx + 4][1] + coeff[10] * ds_a[ty][sharedTx + 5][1] + coeff[11] * ds_a[ty - 5][sharedTx][1] + coeff[12] * ds_a[ty - 4][sharedTx][1] + coeff[13] * ds_a[ty - 3][sharedTx][1] + coeff[14] * ds_a[ty - 2][sharedTx][1] + coeff[15] * ds_a[ty - 1][sharedTx][1] + coeff[16] * ds_a[ty + 1][sharedTx][1] + coeff[17] * ds_a[ty + 2][sharedTx][1] + coeff[18] * ds_a[ty + 3][sharedTx][1] + coeff[19] * ds_a[ty + 4][sharedTx][1] + coeff[20] * ds_a[ty + 5][sharedTx][1] + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront3 = t0_current; } __syncthreads(); t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = a[in_index]; in_index += stride; next_index += stride; // Load Z = 4 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 4 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront4 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 5][1] + coeff[2] * ds_a[ty][sharedTx - 4][1] + coeff[3] * ds_a[ty][sharedTx - 3][1] + coeff[4] * ds_a[ty][sharedTx - 2][1] + coeff[5] * ds_a[ty][sharedTx - 1][1] + coeff[6] * ds_a[ty][sharedTx + 1][1] + coeff[7] * ds_a[ty][sharedTx + 2][1] + coeff[8] * ds_a[ty][sharedTx + 3][1] + coeff[9] * ds_a[ty][sharedTx + 4][1] + coeff[10] * ds_a[ty][sharedTx + 5][1] + coeff[11] * ds_a[ty - 5][sharedTx][1] + coeff[12] * ds_a[ty - 4][sharedTx][1] + coeff[13] * ds_a[ty - 3][sharedTx][1] + coeff[14] * ds_a[ty - 2][sharedTx][1] + coeff[15] * ds_a[ty - 1][sharedTx][1] + coeff[16] * ds_a[ty + 1][sharedTx][1] + coeff[17] * ds_a[ty + 2][sharedTx][1] + coeff[18] * ds_a[ty + 3][sharedTx][1] + coeff[19] * ds_a[ty + 4][sharedTx][1] + coeff[20] * ds_a[ty + 5][sharedTx][1] + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront4 = t0_current; } __syncthreads(); for (int i = 0; i < dimz; i++) { // Load Z = (2R+i) to registers t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = a[in_index]; in_index += stride; next_index += stride; // Load Z = R+i to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = R+i (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (i < dimz-RADIUS) ) { t1_infront5 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 5][1] + coeff[2] * ds_a[ty][sharedTx - 4][1] + coeff[3] * ds_a[ty][sharedTx - 3][1] + coeff[4] * ds_a[ty][sharedTx - 2][1] + coeff[5] * ds_a[ty][sharedTx - 1][1] + coeff[6] * ds_a[ty][sharedTx + 1][1] + coeff[7] * ds_a[ty][sharedTx + 2][1] + coeff[8] * ds_a[ty][sharedTx + 3][1] + coeff[9] * ds_a[ty][sharedTx + 4][1] + coeff[10] * ds_a[ty][sharedTx + 5][1] + coeff[11] * ds_a[ty - 5][sharedTx][1] + coeff[12] * ds_a[ty - 4][sharedTx][1] + coeff[13] * ds_a[ty - 3][sharedTx][1] + coeff[14] * ds_a[ty - 2][sharedTx][1] + coeff[15] * ds_a[ty - 1][sharedTx][1] + coeff[16] * ds_a[ty + 1][sharedTx][1] + coeff[17] * ds_a[ty + 2][sharedTx][1] + coeff[18] * ds_a[ty + 3][sharedTx][1] + coeff[19] * ds_a[ty + 4][sharedTx][1] + coeff[20] * ds_a[ty + 5][sharedTx][1] + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront5 = t0_current; } __syncthreads(); // Load Z = k (t + 1) to shared memory ds_a[ty][sharedTx][0] = t1_current; __syncthreads(); // Compute stencil for Z = k (t + 2) but exclude halo zones if ( (threadIdx.y >= RADIUS) && (threadIdx.y < (BLOCK_DIMY - RADIUS)) && (threadIdx.x >= RADIUS) && (threadIdx.x < (BLOCK_DIMX - RADIUS)) ) { b[out_index] = coeff[0] * t1_current + coeff[1] * ds_a[ty][sharedTx - 5][0] + coeff[2] * ds_a[ty][sharedTx - 4][0] + coeff[3] * ds_a[ty][sharedTx - 3][0] + coeff[4] * ds_a[ty][sharedTx - 2][0] + coeff[5] * ds_a[ty][sharedTx - 1][0] + coeff[6] * ds_a[ty][sharedTx + 1][0] + coeff[7] * ds_a[ty][sharedTx + 2][0] + coeff[8] * ds_a[ty][sharedTx + 3][0] + coeff[9] * ds_a[ty][sharedTx + 4][0] + coeff[10] * ds_a[ty][sharedTx + 5][0] + coeff[11] * ds_a[ty - 5][sharedTx][0] + coeff[12] * ds_a[ty - 4][sharedTx][0] + coeff[13] * ds_a[ty - 3][sharedTx][0] + coeff[14] * ds_a[ty - 2][sharedTx][0] + coeff[15] * ds_a[ty - 1][sharedTx][0] + coeff[16] * ds_a[ty + 1][sharedTx][0] + coeff[17] * ds_a[ty + 2][sharedTx][0] + coeff[18] * ds_a[ty + 3][sharedTx][0] + coeff[19] * ds_a[ty + 4][sharedTx][0] + coeff[20] * ds_a[ty + 5][sharedTx][0] + coeff[21] * t1_behind5 + coeff[22] * t1_behind4 + coeff[23] * t1_behind3 + coeff[24] * t1_behind2 + coeff[25] * t1_behind1 + coeff[26] * t1_infront1 + coeff[27] * t1_infront2 + coeff[28] * t1_infront3 + coeff[29] * t1_infront4 + coeff[30] * t1_infront5; } out_index += stride; t1_behind5 = t1_behind4; t1_behind4 = t1_behind3; t1_behind3 = t1_behind2; t1_behind2 = t1_behind1; t1_behind1 = t1_current; t1_current = t1_infront1; t1_infront1 = t1_infront2; t1_infront2 = t1_infront3; t1_infront3 = t1_infront4; t1_infront4 = t1_infront5; } } /* Optimization Roc: use of read only cache (texture memory) */ __global__ void calcStencilRoc(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Compute stencil b[index] = coeff[0] * __ldg(&a[index]) + coeff[1] * __ldg(&a[index - 5]) + coeff[2] * __ldg(&a[index - 4]) + coeff[3] * __ldg(&a[index - 3]) + coeff[4] * __ldg(&a[index - 2]) + coeff[5] * __ldg(&a[index - 1]) + coeff[6] * __ldg(&a[index + 1]) + coeff[7] * __ldg(&a[index + 2]) + coeff[8] * __ldg(&a[index + 3]) + coeff[9] * __ldg(&a[index + 4]) + coeff[10] * __ldg(&a[index + 5]) + coeff[11] * __ldg(&a[index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[index - pitchedDimx]) + coeff[16] * __ldg(&a[index + pitchedDimx]) + coeff[17] * __ldg(&a[index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[index + 5*pitchedDimx]) + coeff[21] * __ldg(&a[index - 5*stride]) + coeff[22] * __ldg(&a[index - 4*stride]) + coeff[23] * __ldg(&a[index - 3*stride]) + coeff[24] * __ldg(&a[index - 2*stride]) + coeff[25] * __ldg(&a[index - stride]) + coeff[26] * __ldg(&a[index + stride]) + coeff[27] * __ldg(&a[index + 2*stride]) + coeff[28] * __ldg(&a[index + 3*stride]) + coeff[29] * __ldg(&a[index + 4*stride]) + coeff[30] * __ldg(&a[index + 5*stride]); } /* Optimization ShamRoc: use of shared memory + read only cache (texture memory) */ __global__ void calcStencilShamRoc(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy) { // Shared Memory Declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = __ldg(&a[index - (RADIUS*pitchedDimx)]); ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = __ldg(&a[index + (BLOCK_DIMY*pitchedDimx)]); } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = __ldg(&a[index - RADIUS]); ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = __ldg(&a[index + BLOCK_DIMX]); } // Load current position to shared memory ds_a[ty][sharedTx] = __ldg(&a[index]); __syncthreads(); // Compute stencil b[index] = coeff[0] * ds_a[ty][sharedTx] + coeff[1] * ds_a[ty][sharedTx - 5] + coeff[2] * ds_a[ty][sharedTx - 4] + coeff[3] * ds_a[ty][sharedTx - 3] + coeff[4] * ds_a[ty][sharedTx - 2] + coeff[5] * ds_a[ty][sharedTx - 1] + coeff[6] * ds_a[ty][sharedTx + 1] + coeff[7] * ds_a[ty][sharedTx + 2] + coeff[8] * ds_a[ty][sharedTx + 3] + coeff[9] * ds_a[ty][sharedTx + 4] + coeff[10] * ds_a[ty][sharedTx + 5] + coeff[11] * ds_a[ty - 5][sharedTx] + coeff[12] * ds_a[ty - 4][sharedTx] + coeff[13] * ds_a[ty - 3][sharedTx] + coeff[14] * ds_a[ty - 2][sharedTx] + coeff[15] * ds_a[ty - 1][sharedTx] + coeff[16] * ds_a[ty + 1][sharedTx] + coeff[17] * ds_a[ty + 2][sharedTx] + coeff[18] * ds_a[ty + 3][sharedTx] + coeff[19] * ds_a[ty + 4][sharedTx] + coeff[20] * ds_a[ty + 5][sharedTx] + coeff[21] * __ldg(&a[index - 5*stride]) + coeff[22] * __ldg(&a[index - 4*stride]) + coeff[23] * __ldg(&a[index - 3*stride]) + coeff[24] * __ldg(&a[index - 2*stride]) + coeff[25] * __ldg(&a[index - stride]) + coeff[26] * __ldg(&a[index + stride]) + coeff[27] * __ldg(&a[index + 2*stride]) + coeff[28] * __ldg(&a[index + 3*stride]) + coeff[29] * __ldg(&a[index + 4*stride]) + coeff[30] * __ldg(&a[index + 5*stride]); } /* Optimization RocZintReg: use of iteration on Z axis + read only cache (texture memory) */ __global__ void calcStencilRocZintReg(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output register float infront1, infront2, infront3, infront4, infront5; // Variable to store the value in front (in the Z axis) of the current slice register float behind1, behind2, behind3, behind4, behind5; // Variable to store the value behind (in the Z axis) the current slice register float current; // Input value in the current slice // Load initial values (behind5 will be loaded inside the next 'for') behind4 = __ldg(&a[in_index]); in_index += stride; behind3 = __ldg(&a[in_index]); in_index += stride; behind2 = __ldg(&a[in_index]); in_index += stride; behind1 = __ldg(&a[in_index]); in_index += stride; current = __ldg(&a[in_index]); out_index = in_index; in_index += stride; infront1 = __ldg(&a[in_index]); in_index += stride; infront2 = __ldg(&a[in_index]); in_index += stride; infront3 = __ldg(&a[in_index]); in_index += stride; infront4 = __ldg(&a[in_index]); in_index += stride; infront5 = __ldg(&a[in_index]); in_index += stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { // Load the new values in Z axis behind5 = behind4; behind4 = behind3; behind3 = behind2; behind2 = behind1; behind1 = current; current = infront1; infront1 = infront2; infront2 = infront3; infront3 = infront4; infront4 = infront5; infront5 = __ldg(&a[in_index]); in_index += stride; out_index += stride; // Compute stencil b[out_index] = coeff[0] * current + coeff[1] * __ldg(&a[out_index - 5]) + coeff[2] * __ldg(&a[out_index - 4]) + coeff[3] * __ldg(&a[out_index - 3]) + coeff[4] * __ldg(&a[out_index - 2]) + coeff[5] * __ldg(&a[out_index - 1]) + coeff[6] * __ldg(&a[out_index + 1]) + coeff[7] * __ldg(&a[out_index + 2]) + coeff[8] * __ldg(&a[out_index + 3]) + coeff[9] * __ldg(&a[out_index + 4]) + coeff[10] * __ldg(&a[out_index + 5]) + coeff[11] * __ldg(&a[out_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[out_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[out_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[out_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[out_index - pitchedDimx]) + coeff[16] * __ldg(&a[out_index + pitchedDimx]) + coeff[17] * __ldg(&a[out_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[out_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[out_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[out_index + 5*pitchedDimx]) + coeff[21] * behind5 + coeff[22] * behind4 + coeff[23] * behind3 + coeff[24] * behind2 + coeff[25] * behind1 + coeff[26] * infront1 + coeff[27] * infront2 + coeff[28] * infront3 + coeff[29] * infront4 + coeff[30] * infront5; } } /* Optimization RocZint: use of iteration on Z axis without registers + read only cache (texture memory) */ __global__ void calcStencilRocZint(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int out_index = (row * pitchedDimx) + col; // Index for reading Z values out_index += 4*stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { out_index += stride; // Compute stencil b[out_index] = coeff[0] * __ldg(&a[out_index]) + coeff[1] * __ldg(&a[out_index - 5]) + coeff[2] * __ldg(&a[out_index - 4]) + coeff[3] * __ldg(&a[out_index - 3]) + coeff[4] * __ldg(&a[out_index - 2]) + coeff[5] * __ldg(&a[out_index - 1]) + coeff[6] * __ldg(&a[out_index + 1]) + coeff[7] * __ldg(&a[out_index + 2]) + coeff[8] * __ldg(&a[out_index + 3]) + coeff[9] * __ldg(&a[out_index + 4]) + coeff[10] * __ldg(&a[out_index + 5]) + coeff[11] * __ldg(&a[out_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[out_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[out_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[out_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[out_index - pitchedDimx]) + coeff[16] * __ldg(&a[out_index + pitchedDimx]) + coeff[17] * __ldg(&a[out_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[out_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[out_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[out_index + 5*pitchedDimx]) + coeff[21] * __ldg(&a[out_index - 5*stride]) + coeff[22] * __ldg(&a[out_index - 4*stride]) + coeff[23] * __ldg(&a[out_index - 3*stride]) + coeff[24] * __ldg(&a[out_index - 2*stride]) + coeff[25] * __ldg(&a[out_index - stride]) + coeff[26] * __ldg(&a[out_index + stride]) + coeff[27] * __ldg(&a[out_index + 2*stride]) + coeff[28] * __ldg(&a[out_index + 3*stride]) + coeff[29] * __ldg(&a[out_index + 4*stride]) + coeff[30] * __ldg(&a[out_index + 5*stride]); } } /* Optimization ShamRocZintTempReg: shared memory + for iteration on Z axis + temporal blocking (will always compute 2 time iterations) */ __global__ void calcStencilShamRocZintTempReg(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY][BLOCK_DIMX]; int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * (BLOCK_DIMY-2*RADIUS) + ty; int col = blockIdx.x * (BLOCK_DIMX-2*RADIUS) + tx; int stride = pitchedDimx * (dimy + 4*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output int next_index = 0; // Index for plane Z = output + RADIUS // t0 = t + 0 register float t0_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t0_current; // Input value in the current slice // t1 = t + 1 register float t1_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t1_current; // Value in current slice for t+1 // Load ghost zones in_index += RADIUS*stride; t0_behind5 = __ldg(&a[in_index]); // Z = -R = -5 in_index += stride; t0_behind4 = __ldg(&a[in_index]); // Z = -R+1 = -4 in_index += stride; t0_behind3 = __ldg(&a[in_index]); // Z = -R+2 = -3 in_index += stride; t0_behind2 = __ldg(&a[in_index]); // Z = -R+3 = -2 in_index += stride; t0_behind1 = __ldg(&a[in_index]); // Z = -R+4 = -1 in_index += stride; out_index = in_index; // Index for writing output, Z = 0 t0_current = __ldg(&a[in_index]); // Z = 0 in_index += stride; next_index = in_index; // Z = 1 t0_infront1 = __ldg(&a[in_index]); // Z = 1 in_index += stride; t0_infront2 = __ldg(&a[in_index]); // Z = 2 in_index += stride; t0_infront3 = __ldg(&a[in_index]); // Z = 3 in_index += stride; t0_infront4 = __ldg(&a[in_index]); // Z = 4 in_index += stride; t0_infront5 = __ldg(&a[in_index]); // Z = R = 5 in_index += stride; // Compute stencil for Z = 0 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) ) { t1_current = coeff[0] * t0_current + coeff[1] * __ldg(&a[out_index - 5]) + coeff[2] * __ldg(&a[out_index - 4]) + coeff[3] * __ldg(&a[out_index - 3]) + coeff[4] * __ldg(&a[out_index - 2]) + coeff[5] * __ldg(&a[out_index - 1]) + coeff[6] * __ldg(&a[out_index + 1]) + coeff[7] * __ldg(&a[out_index + 2]) + coeff[8] * __ldg(&a[out_index + 3]) + coeff[9] * __ldg(&a[out_index + 4]) + coeff[10] * __ldg(&a[out_index + 5]) + coeff[11] * __ldg(&a[out_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[out_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[out_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[out_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[out_index - pitchedDimx]) + coeff[16] * __ldg(&a[out_index + pitchedDimx]) + coeff[17] * __ldg(&a[out_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[out_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[out_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[out_index + 5*pitchedDimx]) + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_current = t0_current; } // Copy planes Z = -1 to -R to registers in t+1 (ghost zones, keep values in 0.0) t1_behind5 = t0_behind5; t1_behind4 = t0_behind4; t1_behind3 = t0_behind3; t1_behind2 = t0_behind2; t1_behind1 = t0_behind1; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = __ldg(&a[in_index]); in_index += stride; // Compute stencil for Z = 1 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront1 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 5]) + coeff[2] * __ldg(&a[next_index - 4]) + coeff[3] * __ldg(&a[next_index - 3]) + coeff[4] * __ldg(&a[next_index - 2]) + coeff[5] * __ldg(&a[next_index - 1]) + coeff[6] * __ldg(&a[next_index + 1]) + coeff[7] * __ldg(&a[next_index + 2]) + coeff[8] * __ldg(&a[next_index + 3]) + coeff[9] * __ldg(&a[next_index + 4]) + coeff[10] * __ldg(&a[next_index + 5]) + coeff[11] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - pitchedDimx]) + coeff[16] * __ldg(&a[next_index + pitchedDimx]) + coeff[17] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront1 = t0_current; } t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = 2 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront2 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 5]) + coeff[2] * __ldg(&a[next_index - 4]) + coeff[3] * __ldg(&a[next_index - 3]) + coeff[4] * __ldg(&a[next_index - 2]) + coeff[5] * __ldg(&a[next_index - 1]) + coeff[6] * __ldg(&a[next_index + 1]) + coeff[7] * __ldg(&a[next_index + 2]) + coeff[8] * __ldg(&a[next_index + 3]) + coeff[9] * __ldg(&a[next_index + 4]) + coeff[10] * __ldg(&a[next_index + 5]) + coeff[11] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - pitchedDimx]) + coeff[16] * __ldg(&a[next_index + pitchedDimx]) + coeff[17] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront2 = t0_current; } t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = 3 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront3 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 5]) + coeff[2] * __ldg(&a[next_index - 4]) + coeff[3] * __ldg(&a[next_index - 3]) + coeff[4] * __ldg(&a[next_index - 2]) + coeff[5] * __ldg(&a[next_index - 1]) + coeff[6] * __ldg(&a[next_index + 1]) + coeff[7] * __ldg(&a[next_index + 2]) + coeff[8] * __ldg(&a[next_index + 3]) + coeff[9] * __ldg(&a[next_index + 4]) + coeff[10] * __ldg(&a[next_index + 5]) + coeff[11] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - pitchedDimx]) + coeff[16] * __ldg(&a[next_index + pitchedDimx]) + coeff[17] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront3 = t0_current; } t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = 3 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront4 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 5]) + coeff[2] * __ldg(&a[next_index - 4]) + coeff[3] * __ldg(&a[next_index - 3]) + coeff[4] * __ldg(&a[next_index - 2]) + coeff[5] * __ldg(&a[next_index - 1]) + coeff[6] * __ldg(&a[next_index + 1]) + coeff[7] * __ldg(&a[next_index + 2]) + coeff[8] * __ldg(&a[next_index + 3]) + coeff[9] * __ldg(&a[next_index + 4]) + coeff[10] * __ldg(&a[next_index + 5]) + coeff[11] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - pitchedDimx]) + coeff[16] * __ldg(&a[next_index + pitchedDimx]) + coeff[17] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront4 = t0_current; } for (int i = 0; i < dimz; i++) { // Load Z = (2R+i) to registers t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = R+i (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (i < dimz-RADIUS) ) { t1_infront5 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 5]) + coeff[2] * __ldg(&a[next_index - 4]) + coeff[3] * __ldg(&a[next_index - 3]) + coeff[4] * __ldg(&a[next_index - 2]) + coeff[5] * __ldg(&a[next_index - 1]) + coeff[6] * __ldg(&a[next_index + 1]) + coeff[7] * __ldg(&a[next_index + 2]) + coeff[8] * __ldg(&a[next_index + 3]) + coeff[9] * __ldg(&a[next_index + 4]) + coeff[10] * __ldg(&a[next_index + 5]) + coeff[11] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - pitchedDimx]) + coeff[16] * __ldg(&a[next_index + pitchedDimx]) + coeff[17] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront5 = t0_current; } __syncthreads(); // Load Z = k (t + 1) to shared memory ds_a[threadIdx.y][threadIdx.x] = t1_current; __syncthreads(); // Compute stencil for Z = k (t + 2) but exclude halo zones if ( (threadIdx.y >= RADIUS) && (threadIdx.y < (BLOCK_DIMY - RADIUS)) && (threadIdx.x >= RADIUS) && (threadIdx.x < (BLOCK_DIMX - RADIUS)) ) { b[out_index] = coeff[0] * t1_current + coeff[1] * ds_a[threadIdx.y][threadIdx.x - 5] + coeff[2] * ds_a[threadIdx.y][threadIdx.x - 4] + coeff[3] * ds_a[threadIdx.y][threadIdx.x - 3] + coeff[4] * ds_a[threadIdx.y][threadIdx.x - 2] + coeff[5] * ds_a[threadIdx.y][threadIdx.x - 1] + coeff[6] * ds_a[threadIdx.y][threadIdx.x + 1] + coeff[7] * ds_a[threadIdx.y][threadIdx.x + 2] + coeff[8] * ds_a[threadIdx.y][threadIdx.x + 3] + coeff[9] * ds_a[threadIdx.y][threadIdx.x + 4] + coeff[10] * ds_a[threadIdx.y][threadIdx.x + 5] + coeff[11] * ds_a[threadIdx.y - 5][threadIdx.x] + coeff[12] * ds_a[threadIdx.y - 4][threadIdx.x] + coeff[13] * ds_a[threadIdx.y - 3][threadIdx.x] + coeff[14] * ds_a[threadIdx.y - 2][threadIdx.x] + coeff[15] * ds_a[threadIdx.y - 1][threadIdx.x] + coeff[16] * ds_a[threadIdx.y + 1][threadIdx.x] + coeff[17] * ds_a[threadIdx.y + 2][threadIdx.x] + coeff[18] * ds_a[threadIdx.y + 3][threadIdx.x] + coeff[19] * ds_a[threadIdx.y + 4][threadIdx.x] + coeff[20] * ds_a[threadIdx.y + 5][threadIdx.x] + coeff[21] * t1_behind5 + coeff[22] * t1_behind4 + coeff[23] * t1_behind3 + coeff[24] * t1_behind2 + coeff[25] * t1_behind1 + coeff[26] * t1_infront1 + coeff[27] * t1_infront2 + coeff[28] * t1_infront3 + coeff[29] * t1_infront4 + coeff[30] * t1_infront5; } out_index += stride; t1_behind5 = t1_behind4; t1_behind4 = t1_behind3; t1_behind3 = t1_behind2; t1_behind2 = t1_behind1; t1_behind1 = t1_current; t1_current = t1_infront1; t1_infront1 = t1_infront2; t1_infront2 = t1_infront3; t1_infront3 = t1_infront4; t1_infront4 = t1_infront5; } } void initGold(float *a, int dimx, int dimy, int dimz, int pitchedDimx) { int stride = pitchedDimx * (dimy+2*RADIUS); int index = 0; for (int i = 0; i < (dimz+2*RADIUS); i++) { for (int j = 0; j < (dimy+2*RADIUS); j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if (i<RADIUS || j<RADIUS || i>=dimz+RADIUS || j>=dimy+RADIUS || k<PADDING_SIZE || k>=dimx+PADDING_SIZE) { a[index] = 0.0; } else { a[index] = 1.0; } } } } } void initGoldTemporal(float *a, int dimx, int dimy, int dimz, int pitchedDimx) { int stride = pitchedDimx * (dimy+4*RADIUS); int index = 0; for (int i = 0; i < (dimz+4*RADIUS); i++) { for (int j = 0; j < (dimy+4*RADIUS); j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if ( i<2*RADIUS || j<2*RADIUS || i>=dimz+2*RADIUS || j>=dimy+2*RADIUS || k<PADDING_SIZE || k>=dimx+PADDING_SIZE ) { a[index] = 0.0; } else { a[index] = 1.0; } } } } } void hostStencil(float *a, int t_end, int dimx, int dimy, int dimz, float *hcoeff, int pitchedDimx) { float *b; int stride = pitchedDimx * (dimy+2*RADIUS); b = (float *)malloc((dimz+2*RADIUS) * stride * sizeof(float)); initGold(b, dimx, dimy, dimz, pitchedDimx); int index = 0; for (int t = 0; t < t_end; t++) { for (int i = RADIUS; i < dimz+RADIUS; i++) { for (int j = RADIUS; j < dimy+RADIUS; j++) { for (int k = PADDING_SIZE; k < dimx+PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; if (t%2) { a[index] = hcoeff[0] * b[index] + hcoeff[1] * b[index - 5] + hcoeff[2] * b[index - 4] + hcoeff[3] * b[index - 3] + hcoeff[4] * b[index - 2] + hcoeff[5] * b[index - 1] + hcoeff[6] * b[index + 1] + hcoeff[7] * b[index + 2] + hcoeff[8] * b[index + 3] + hcoeff[9] * b[index + 4] + hcoeff[10] * b[index + 5] + hcoeff[11] * b[index - 5*pitchedDimx] + hcoeff[12] * b[index - 4*pitchedDimx] + hcoeff[13] * b[index - 3*pitchedDimx] + hcoeff[14] * b[index - 2*pitchedDimx] + hcoeff[15] * b[index - pitchedDimx] + hcoeff[16] * b[index + pitchedDimx] + hcoeff[17] * b[index + 2*pitchedDimx] + hcoeff[18] * b[index + 3*pitchedDimx] + hcoeff[19] * b[index + 4*pitchedDimx] + hcoeff[20] * b[index + 5*pitchedDimx] + hcoeff[21] * b[index - 5*stride] + hcoeff[22] * b[index - 4*stride] + hcoeff[23] * b[index - 3*stride] + hcoeff[24] * b[index - 2*stride] + hcoeff[25] * b[index - stride] + hcoeff[26] * b[index + stride] + hcoeff[27] * b[index + 2*stride] + hcoeff[28] * b[index + 3*stride] + hcoeff[29] * b[index + 4*stride] + hcoeff[30] * b[index + 5*stride]; } else { b[index] = hcoeff[0] * a[index] + hcoeff[1] * a[index - 5] + hcoeff[2] * a[index - 4] + hcoeff[3] * a[index - 3] + hcoeff[4] * a[index - 2] + hcoeff[5] * a[index - 1] + hcoeff[6] * a[index + 1] + hcoeff[7] * a[index + 2] + hcoeff[8] * a[index + 3] + hcoeff[9] * a[index + 4] + hcoeff[10] * a[index + 5] + hcoeff[11] * a[index - 5*pitchedDimx] + hcoeff[12] * a[index - 4*pitchedDimx] + hcoeff[13] * a[index - 3*pitchedDimx] + hcoeff[14] * a[index - 2*pitchedDimx] + hcoeff[15] * a[index - pitchedDimx] + hcoeff[16] * a[index + pitchedDimx] + hcoeff[17] * a[index + 2*pitchedDimx] + hcoeff[18] * a[index + 3*pitchedDimx] + hcoeff[19] * a[index + 4*pitchedDimx] + hcoeff[20] * a[index + 5*pitchedDimx] + hcoeff[21] * a[index - 5*stride] + hcoeff[22] * a[index - 4*stride] + hcoeff[23] * a[index - 3*stride] + hcoeff[24] * a[index - 2*stride] + hcoeff[25] * a[index - stride] + hcoeff[26] * a[index + stride] + hcoeff[27] * a[index + 2*stride] + hcoeff[28] * a[index + 3*stride] + hcoeff[29] * a[index + 4*stride] + hcoeff[30] * a[index + 5*stride]; } } } } } if (t_end%2) { for (int i = RADIUS; i < dimz+RADIUS; i++) { for (int j = RADIUS; j < dimy+RADIUS; j++) { for (int k = PADDING_SIZE; k < dimx+PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; a[index] = b[index]; } } } } free(b); } void hostStencilTemporal(float *a, int t_end, int dimx, int dimy, int dimz, float *hcoeff, int pitchedDimx) { float *b; int stride = pitchedDimx * (dimy+4*RADIUS); b = (float *)malloc((dimz+2*RADIUS) * stride * sizeof(float)); initGoldTemporal(b, dimx, dimy, dimz, pitchedDimx); int index = 0; for (int t = 0; t < t_end; t++) { for (int i = 2*RADIUS; i < dimz+2*RADIUS; i++) { for (int j = 2*RADIUS; j < dimy+2*RADIUS; j++) { for (int k = PADDING_SIZE; k < pitchedDimx-PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; if (t%2) { a[index] = hcoeff[0] * b[index] + hcoeff[1] * b[index - 5] + hcoeff[2] * b[index - 4] + hcoeff[3] * b[index - 3] + hcoeff[4] * b[index - 2] + hcoeff[5] * b[index - 1] + hcoeff[6] * b[index + 1] + hcoeff[7] * b[index + 2] + hcoeff[8] * b[index + 3] + hcoeff[9] * b[index + 4] + hcoeff[10] * b[index + 5] + hcoeff[11] * b[index - 5*pitchedDimx] + hcoeff[12] * b[index - 4*pitchedDimx] + hcoeff[13] * b[index - 3*pitchedDimx] + hcoeff[14] * b[index - 2*pitchedDimx] + hcoeff[15] * b[index - pitchedDimx] + hcoeff[16] * b[index + pitchedDimx] + hcoeff[17] * b[index + 2*pitchedDimx] + hcoeff[18] * b[index + 3*pitchedDimx] + hcoeff[19] * b[index + 4*pitchedDimx] + hcoeff[20] * b[index + 5*pitchedDimx] + hcoeff[21] * b[index - 5*stride] + hcoeff[22] * b[index - 4*stride] + hcoeff[23] * b[index - 3*stride] + hcoeff[24] * b[index - 2*stride] + hcoeff[25] * b[index - stride] + hcoeff[26] * b[index + stride] + hcoeff[27] * b[index + 2*stride] + hcoeff[28] * b[index + 3*stride] + hcoeff[29] * b[index + 4*stride] + hcoeff[30] * b[index + 5*stride]; } else { b[index] = hcoeff[0] * a[index] + hcoeff[1] * a[index - 5] + hcoeff[2] * a[index - 4] + hcoeff[3] * a[index - 3] + hcoeff[4] * a[index - 2] + hcoeff[5] * a[index - 1] + hcoeff[6] * a[index + 1] + hcoeff[7] * a[index + 2] + hcoeff[8] * a[index + 3] + hcoeff[9] * a[index + 4] + hcoeff[10] * a[index + 5] + hcoeff[11] * a[index - 5*pitchedDimx] + hcoeff[12] * a[index - 4*pitchedDimx] + hcoeff[13] * a[index - 3*pitchedDimx] + hcoeff[14] * a[index - 2*pitchedDimx] + hcoeff[15] * a[index - pitchedDimx] + hcoeff[16] * a[index + pitchedDimx] + hcoeff[17] * a[index + 2*pitchedDimx] + hcoeff[18] * a[index + 3*pitchedDimx] + hcoeff[19] * a[index + 4*pitchedDimx] + hcoeff[20] * a[index + 5*pitchedDimx] + hcoeff[21] * a[index - 5*stride] + hcoeff[22] * a[index - 4*stride] + hcoeff[23] * a[index - 3*stride] + hcoeff[24] * a[index - 2*stride] + hcoeff[25] * a[index - stride] + hcoeff[26] * a[index + stride] + hcoeff[27] * a[index + 2*stride] + hcoeff[28] * a[index + 3*stride] + hcoeff[29] * a[index + 4*stride] + hcoeff[30] * a[index + 5*stride]; } } } } } if (t_end%2) { for (int i = 2*RADIUS; i < dimz+2*RADIUS; i++) { for (int j = 2*RADIUS; j < dimy+2*RADIUS; j++) { for (int k = PADDING_SIZE; k < pitchedDimx-PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; a[index] = b[index]; } } } } free(b); } void printMatrix(float *a, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+2*RADIUS); for (int i=0; i < dimz+2*RADIUS; i++) { for (int j=0; j < dimy+2*RADIUS; j++) { for (int k=0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; printf("%f, ",a[index]); } printf("\n"); } printf("\n"); } } void printMatrixTemporal(float *a, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+4*RADIUS); for (int i=0; i < dimz+4*RADIUS; i++) { for (int j=0; j < dimy+4*RADIUS; j++) { for (int k=0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; printf("%f, ",a[index]); } printf("\n"); } printf("\n"); } } bool checkResult(float *a, float *ref, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+2*RADIUS); for (int i = 0; i < dimz+2*RADIUS; i++) { for (int j = 0; j < dimy+2*RADIUS; j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if (a[index] != ref[index]) { printf("Expected: %f, received: %f at position [z=%d,y=%d,x=%d]\n",ref[index],a[index],i,j,k); return 0; } } } } return 1; } bool checkResultTemporal(float *a, float *ref, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+4*RADIUS); for (int i = 0; i < dimz+4*RADIUS; i++) { for (int j = 0; j < dimy+4*RADIUS; j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if (a[index] != ref[index]) { printf("Expected: %f, received: %f at position [z=%d,y=%d,x=%d]\n",ref[index],a[index],i,j,k); return 0; } } } } return 1; } int main(int argc, char* argv[]) { float *h_a, *h_gold_a; float *d_a, *d_b; float hcoeff[RADIUS*6+1] = {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}; hipEvent_t t0, t1, t2, t3, t4, t5; float init, host_comp, host2gpu, gpu2host, gpu_comp, tot; int dimx, dimy, dimz, t_end; long points, flop; float gFlops; int opt; // Variable to select the optimization char vbs = 0; if (argc == 7) { vbs = 1; } else { if (argc != 6) { printf("use: <exec> <OPT> <DIMX> <DIMY> <DIMZ> <T_END> <VBS(1)>\n" "Available optimizations (value should be used as the first parameter in the command line):\n" "0 - Base -> no optimization\n" "1 - Sham -> shared memory\n" "2 - ZintReg -> for iteration on Z axis (Paulius)\n" "3 - Zint -> for iteration on Z axis without using registers\n" "4 - ShamZintReg -> shared memory + for iteration on Z axis\n" "5 - ShamZint -> shared memory + for iteration on Z axis without registers\n" "6 - ShamZintTempReg -> shared memory + for iteration on Z axis + temporal blocking\n" "7 - Roc -> use of read only cache (__restrict__ and const modifiers)\n" "8 - ShamRoc -> use of shared memory + read only cache (__restrict__ and const modifiers)\n" "9 - RocZintReg -> for iteration on Z axis + read only cache\n" "10 - RocZint -> for iteration on Z axis without registers + read only cache\n" "11 - ShamRocZintTempReg -> shared memory + read only cache + for iteration on Z axis + temporal blocking\n" ); exit(-1); } } opt = atoi(argv[1]); dimx = atoi(argv[2]); dimy = atoi(argv[3]); dimz = atoi(argv[4]); t_end = atoi(argv[5]); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); hipEventCreate(&t0); hipEventCreate(&t1); hipEventCreate(&t2); hipEventCreate(&t3); hipEventCreate(&t4); hipEventCreate(&t5); int pitchedDimx = dimx + 2*PADDING_SIZE; int gold_size; // If temporal blocking is requested, allocate more device memory if ( (opt == 6) || (opt == 11) ) { gold_size = pitchedDimx * (dimy+4*RADIUS) * (dimz+4*RADIUS) * sizeof(float); // Check if the number of iterations is even if ( (t_end%2) != 0) { if (vbs == 0) printf("Number of time iterations is odd, adding one iteration!\n"); t_end++; } } else { gold_size = pitchedDimx * (dimy+2*RADIUS) * (dimz+2*RADIUS) * sizeof(float); } points = (long)dimx * (long)dimy * (long)dimz * (long)t_end; flop = (long)(30 + 31) * points; // 30 adds, 31 multiplies hipEventRecord(t0); /* allocate device variables */ wbCheck(hipMalloc((void**) &d_a, gold_size)); wbCheck(hipMalloc((void**) &d_b, gold_size)); /* allocate host variables */ h_a = (float *)malloc(gold_size); h_gold_a = (float *)malloc(gold_size); if ( (opt == 6) || (opt == 11) ) { initGoldTemporal(h_a, dimx, dimy, dimz, pitchedDimx); initGoldTemporal(h_gold_a, dimx, dimy, dimz, pitchedDimx); } else { initGold(h_a, dimx, dimy, dimz, pitchedDimx); initGold(h_gold_a, dimx, dimy, dimz, pitchedDimx); } hipEventRecord(t1); if (vbs == 0) { if ( (opt == 6) || (opt == 11) ) { hostStencilTemporal(h_gold_a, t_end, dimx, dimy, dimz, hcoeff, pitchedDimx); } else { hostStencil(h_gold_a, t_end, dimx, dimy, dimz, hcoeff, pitchedDimx); } } #ifdef PRINT_GOLD if ( (opt == 6) || (opt == 11) ) { printMatrixTemporal(h_gold_a, pitchedDimx, dimy, dimz); } else { printMatrix(h_gold_a, pitchedDimx, dimy, dimz); } #endif hipEventRecord(t2); wbCheck(hipMemcpyToSymbol(coeff, hcoeff, sizeof(hcoeff))); wbCheck(hipMemcpy(d_a, h_a, gold_size, hipMemcpyHostToDevice)); // Initialize device values wbCheck(hipMemcpy(d_b, d_a, gold_size, hipMemcpyDeviceToDevice)); // Copy contents from d_a to d_b hipEventRecord(t3); dim3 dimBlock; dim3 dimGrid; switch (opt) { case 0: if (vbs == 0) printf("Optimization level: 0 - Base\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilBase) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy); } else { hipLaunchKernelGGL(( calcStencilBase) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy); } wbCheck(hipGetLastError()); } break; case 1: if (vbs == 0) printf("Optimization level: 1 - Sham\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilSham) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy); } else { hipLaunchKernelGGL(( calcStencilSham) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy); } wbCheck(hipGetLastError()); } break; case 2: if (vbs == 0) printf("Optimization level: 2 - ZintReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilZintReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilZintReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; case 3: if (vbs == 0) printf("Optimization level: 3 - Zint\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilZint) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilZint) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; case 4: if (vbs == 0) printf("Optimization level: 4 - ShamZintReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilShamZintReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilShamZintReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; case 5: if (vbs == 0) printf("Optimization level: 5 - ShamZint\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilShamZint) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilShamZint) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; case 6: if (vbs == 0) printf("Optimization level: 6 - ShamZintTempReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/(BLOCK_DIMX-2*RADIUS)); dimGrid.y = (int)ceil(dimy/(BLOCK_DIMY-2*RADIUS)); dimGrid.z = 1; for (int i = 0; i < t_end/2; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilShamZintTempReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilShamZintTempReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; case 7: if (vbs == 0) printf("Optimization level: 7 - Roc\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilRoc) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy); } else { hipLaunchKernelGGL(( calcStencilRoc) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy); } wbCheck(hipGetLastError()); } break; case 8: if (vbs == 0) printf("Optimization level: 8 - ShamRoc\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilShamRoc) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy); } else { hipLaunchKernelGGL(( calcStencilShamRoc) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy); } wbCheck(hipGetLastError()); } break; case 9: if (vbs == 0) printf("Optimization level: 9 - RocZintReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilRocZintReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilRocZintReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; case 10: if (vbs == 0) printf("Optimization level: 10 - RocZint\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilRocZint) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilRocZint) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; case 11: if (vbs == 0) printf("Optimization level: 11 - ShamRocZintTempReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/(BLOCK_DIMX-2*RADIUS)); dimGrid.y = (int)ceil(dimy/(BLOCK_DIMY-2*RADIUS)); dimGrid.z = 1; for (int i = 0; i < t_end/2; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilShamRocZintTempReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilShamRocZintTempReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; default: printf("Invalid optimization selected\n"); break; } hipEventRecord(t4); hipDeviceSynchronize(); if ( (opt == 6) || (opt == 11) ) { if ((t_end/2)%2) { wbCheck(hipMemcpy(h_a, d_b, gold_size, hipMemcpyDeviceToHost)); } else { wbCheck(hipMemcpy(h_a, d_a, gold_size, hipMemcpyDeviceToHost)); } } else { if (t_end%2) { wbCheck(hipMemcpy(h_a, d_b, gold_size, hipMemcpyDeviceToHost)); } else { wbCheck(hipMemcpy(h_a, d_a, gold_size, hipMemcpyDeviceToHost)); } } hipEventRecord(t5); hipFree(d_a); hipFree(d_b); #ifdef PRINT_RESULT if ( (opt == 6) || (opt == 11) ) { printMatrixTemporal(h_a,pitchedDimx,dimy,dimz); } else { printMatrix(h_a,pitchedDimx,dimy,dimz); } #endif if (vbs == 0) { if ( (opt == 6) || (opt == 11) ) { if (checkResultTemporal(h_a,h_gold_a,pitchedDimx,dimy,dimz)) { printf("Correct results!\n"); } else { printf("Wrong results!!!!!!\n"); } } else { if (checkResult(h_a,h_gold_a,pitchedDimx,dimy,dimz)) { printf("Correct results!\n"); } else { printf("Wrong results!!!!!!\n"); } } } hipEventSynchronize(t5); hipEventElapsedTime(&init, t0, t1); hipEventElapsedTime(&host_comp, t1, t2); hipEventElapsedTime(&host2gpu, t2, t3); hipEventElapsedTime(&gpu_comp, t3, t4); hipEventElapsedTime(&gpu2host, t4, t5); hipEventElapsedTime(&tot, t0, t5); gFlops = (1.0e-6)*flop/gpu_comp; free(h_a); free(h_gold_a); if (vbs == 0) { printf("GPU Clock: %d MHz\n",prop.clockRate/1000); printf("DIM = %dx%dx%d; T_END = %d; BLOCK_WIDTH = %dx%dx%d\n", dimx,dimy,dimz,t_end,BLOCK_DIMX,BLOCK_DIMY,BLOCK_DIMZ); printf("init=%f, host_comp=%f, host2gpu=%f, gpu_comp=%f, gpu2host=%f, tot=%f \n", init, host_comp, host2gpu, gpu_comp, gpu2host, tot); printf("Stencil Throughput: %f Gpts/s\n", (1.0e-6*points)/gpu_comp); // gpu_comp is measured in ms printf("gFlops = %f GFLOPs\n", gFlops); printf("\n"); } else { printf("%d,%d,%d,%f,%f\n", dimx,dimy,dimz,gFlops,gpu_comp); } return 0; }
0c2aff43088ae97c6d44353ef1e2501f89e0bc05.cu
/* Available optimizations (value should be used as the first parameter in the command line): 0 - Base -> no optimization 1 - Sham -> shared memory 2 - ZintReg -> for iteration on Z axis (Paulius) 3 - Zint -> for iteration on Z axis without using registers 4 - ShamZintReg -> shared memory + for iteration on Z axis 5 - ShamZint -> shared memory + for iteration on Z axis without registers 6 - ShamZintTempReg -> shared memory + for iteration on Z axis + temporal blocking 7 - Roc -> use of read only cache (__restrict__ and const modifiers) 8 - ShamRoc -> use of shared memory + read only cache (__restrict__ and const modifiers) 9 - RocZintReg -> for iteration on Z axis + read only cache 10 - RocZint -> for iteration on Z axis without registers + read only cache 11 - ShamRocZintTempReg -> shared memory + read only cache + for iteration on Z axis + temporal blocking Known limitations: data grid size must be multiple of BLOCK_SIZE */ #include <stdio.h> //#define PRINT_GOLD //#define PRINT_RESULT #define BLOCK_DIMX 32 #define BLOCK_DIMY 16 #define BLOCK_DIMZ 1 #define RADIUS 5 // Half of the order #define PADDING_SIZE 32 // Error checking function #define wbCheck(stmt) do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ printf("ERROR: Failed to run stmt %s\n", #stmt); \ printf("ERROR: Got CUDA error ... %s\n", cudaGetErrorString(err)); \ return -1; \ } \ } while(0) __constant__ float coeff[RADIUS*6+1]; /* Optimization Base: baseline code (no optimization) */ __global__ void calcStencilBase(float *a, float *b, int pitchedDimx, int dimy) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Compute stencil b[index] = coeff[0] * a[index] + coeff[1] * a[index - 5] + coeff[2] * a[index - 4] + coeff[3] * a[index - 3] + coeff[4] * a[index - 2] + coeff[5] * a[index - 1] + coeff[6] * a[index + 1] + coeff[7] * a[index + 2] + coeff[8] * a[index + 3] + coeff[9] * a[index + 4] + coeff[10] * a[index + 5] + coeff[11] * a[index - 5*pitchedDimx] + coeff[12] * a[index - 4*pitchedDimx] + coeff[13] * a[index - 3*pitchedDimx] + coeff[14] * a[index - 2*pitchedDimx] + coeff[15] * a[index - pitchedDimx] + coeff[16] * a[index + pitchedDimx] + coeff[17] * a[index + 2*pitchedDimx] + coeff[18] * a[index + 3*pitchedDimx] + coeff[19] * a[index + 4*pitchedDimx] + coeff[20] * a[index + 5*pitchedDimx] + coeff[21] * a[index - 5*stride] + coeff[22] * a[index - 4*stride] + coeff[23] * a[index - 3*stride] + coeff[24] * a[index - 2*stride] + coeff[25] * a[index - stride] + coeff[26] * a[index + stride] + coeff[27] * a[index + 2*stride] + coeff[28] * a[index + 3*stride] + coeff[29] * a[index + 4*stride] + coeff[30] * a[index + 5*stride]; } /* Optimization Sham: shared memory */ __global__ void calcStencilSham(float *a, float *b, int pitchedDimx, int dimy) { // Shared Memory Declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = a[index - (RADIUS*pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = a[index + (BLOCK_DIMY*pitchedDimx)]; } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = a[index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = a[index + BLOCK_DIMX]; } // Load current position to shared memory ds_a[ty][sharedTx] = a[index]; __syncthreads(); // Compute stencil b[index] = coeff[0] * ds_a[ty][sharedTx] + coeff[1] * ds_a[ty][sharedTx - 5] + coeff[2] * ds_a[ty][sharedTx - 4] + coeff[3] * ds_a[ty][sharedTx - 3] + coeff[4] * ds_a[ty][sharedTx - 2] + coeff[5] * ds_a[ty][sharedTx - 1] + coeff[6] * ds_a[ty][sharedTx + 1] + coeff[7] * ds_a[ty][sharedTx + 2] + coeff[8] * ds_a[ty][sharedTx + 3] + coeff[9] * ds_a[ty][sharedTx + 4] + coeff[10] * ds_a[ty][sharedTx + 5] + coeff[11] * ds_a[ty - 5][sharedTx] + coeff[12] * ds_a[ty - 4][sharedTx] + coeff[13] * ds_a[ty - 3][sharedTx] + coeff[14] * ds_a[ty - 2][sharedTx] + coeff[15] * ds_a[ty - 1][sharedTx] + coeff[16] * ds_a[ty + 1][sharedTx] + coeff[17] * ds_a[ty + 2][sharedTx] + coeff[18] * ds_a[ty + 3][sharedTx] + coeff[19] * ds_a[ty + 4][sharedTx] + coeff[20] * ds_a[ty + 5][sharedTx] + coeff[21] * a[index - 5*stride] + coeff[22] * a[index - 4*stride] + coeff[23] * a[index - 3*stride] + coeff[24] * a[index - 2*stride] + coeff[25] * a[index - stride] + coeff[26] * a[index + stride] + coeff[27] * a[index + 2*stride] + coeff[28] * a[index + 3*stride] + coeff[29] * a[index + 4*stride] + coeff[30] * a[index + 5*stride]; } /* Optimization ZintReg: for iteration on Z axis with registers */ __global__ void calcStencilZintReg(float *a, float *b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output register float infront1, infront2, infront3, infront4, infront5; // Variable to store the value in front (in the Z axis) of the current slice register float behind1, behind2, behind3, behind4, behind5; // Variable to store the value behind (in the Z axis) the current slice register float current; // Input value in the current slice // Load initial values (behind5 will be loaded inside the next 'for') behind4 = a[in_index]; in_index += stride; behind3 = a[in_index]; in_index += stride; behind2 = a[in_index]; in_index += stride; behind1 = a[in_index]; in_index += stride; current = a[in_index]; out_index = in_index; in_index += stride; infront1 = a[in_index]; in_index += stride; infront2 = a[in_index]; in_index += stride; infront3 = a[in_index]; in_index += stride; infront4 = a[in_index]; in_index += stride; infront5 = a[in_index]; in_index += stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { // Load the new values in Z axis behind5 = behind4; behind4 = behind3; behind3 = behind2; behind2 = behind1; behind1 = current; current = infront1; infront1 = infront2; infront2 = infront3; infront3 = infront4; infront4 = infront5; infront5 = a[in_index]; in_index += stride; out_index += stride; // Compute stencil b[out_index] = coeff[0] * current + coeff[1] * a[out_index - 5] + coeff[2] * a[out_index - 4] + coeff[3] * a[out_index - 3] + coeff[4] * a[out_index - 2] + coeff[5] * a[out_index - 1] + coeff[6] * a[out_index + 1] + coeff[7] * a[out_index + 2] + coeff[8] * a[out_index + 3] + coeff[9] * a[out_index + 4] + coeff[10] * a[out_index + 5] + coeff[11] * a[out_index - 5*pitchedDimx] + coeff[12] * a[out_index - 4*pitchedDimx] + coeff[13] * a[out_index - 3*pitchedDimx] + coeff[14] * a[out_index - 2*pitchedDimx] + coeff[15] * a[out_index - pitchedDimx] + coeff[16] * a[out_index + pitchedDimx] + coeff[17] * a[out_index + 2*pitchedDimx] + coeff[18] * a[out_index + 3*pitchedDimx] + coeff[19] * a[out_index + 4*pitchedDimx] + coeff[20] * a[out_index + 5*pitchedDimx] + coeff[21] * behind5 + coeff[22] * behind4 + coeff[23] * behind3 + coeff[24] * behind2 + coeff[25] * behind1 + coeff[26] * infront1 + coeff[27] * infront2 + coeff[28] * infront3 + coeff[29] * infront4 + coeff[30] * infront5; } } /* Optimization Zint: for iteration on Z axis without using registers */ __global__ void calcStencilZint(float *a, float *b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int out_index = (row * pitchedDimx) + col; // Index for writing output out_index += 4*stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { out_index += stride; // Compute stencil b[out_index] = coeff[0] * a[out_index] + coeff[1] * a[out_index - 5] + coeff[2] * a[out_index - 4] + coeff[3] * a[out_index - 3] + coeff[4] * a[out_index - 2] + coeff[5] * a[out_index - 1] + coeff[6] * a[out_index + 1] + coeff[7] * a[out_index + 2] + coeff[8] * a[out_index + 3] + coeff[9] * a[out_index + 4] + coeff[10] * a[out_index + 5] + coeff[11] * a[out_index - 5*pitchedDimx] + coeff[12] * a[out_index - 4*pitchedDimx] + coeff[13] * a[out_index - 3*pitchedDimx] + coeff[14] * a[out_index - 2*pitchedDimx] + coeff[15] * a[out_index - pitchedDimx] + coeff[16] * a[out_index + pitchedDimx] + coeff[17] * a[out_index + 2*pitchedDimx] + coeff[18] * a[out_index + 3*pitchedDimx] + coeff[19] * a[out_index + 4*pitchedDimx] + coeff[20] * a[out_index + 5*pitchedDimx] + coeff[21] * a[out_index - 5*stride] + coeff[22] * a[out_index - 4*stride] + coeff[23] * a[out_index - 3*stride] + coeff[24] * a[out_index - 2*stride] + coeff[25] * a[out_index - stride] + coeff[26] * a[out_index + stride] + coeff[27] * a[out_index + 2*stride] + coeff[28] * a[out_index + 3*stride] + coeff[29] * a[out_index + 4*stride] + coeff[30] * a[out_index + 5*stride]; } } /* Optimization ShamZintReg: for iteration on Z axis + use of shared memory */ __global__ void calcStencilShamZintReg(float *a, float *b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output register float infront1, infront2, infront3, infront4, infront5; // Variable to store the value in front (in the Z axis) of the current slice register float behind1, behind2, behind3, behind4, behind5; // Variable to store the value behind (in the Z axis) the current slice register float current; // Input value in the current slice // Load initial values (behind5 will be loaded inside the next 'for') behind4 = a[in_index]; in_index += stride; behind3 = a[in_index]; in_index += stride; behind2 = a[in_index]; in_index += stride; behind1 = a[in_index]; in_index += stride; current = a[in_index]; out_index = in_index; in_index += stride; infront1 = a[in_index]; in_index += stride; infront2 = a[in_index]; in_index += stride; infront3 = a[in_index]; in_index += stride; infront4 = a[in_index]; in_index += stride; infront5 = a[in_index]; in_index += stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { // Load the new values in Z axis behind5 = behind4; behind4 = behind3; behind3 = behind2; behind2 = behind1; behind1 = current; current = infront1; infront1 = infront2; infront2 = infront3; infront3 = infront4; infront4 = infront5; infront5 = a[in_index]; in_index += stride; out_index += stride; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = a[out_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = a[out_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = a[out_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = a[out_index + BLOCK_DIMX]; } // Load current position to shared memory ds_a[ty][sharedTx] = current; __syncthreads(); // Compute stencil b[out_index] = coeff[0] * current + coeff[1] * ds_a[ty][sharedTx - 5] + coeff[2] * ds_a[ty][sharedTx - 4] + coeff[3] * ds_a[ty][sharedTx - 3] + coeff[4] * ds_a[ty][sharedTx - 2] + coeff[5] * ds_a[ty][sharedTx - 1] + coeff[6] * ds_a[ty][sharedTx + 1] + coeff[7] * ds_a[ty][sharedTx + 2] + coeff[8] * ds_a[ty][sharedTx + 3] + coeff[9] * ds_a[ty][sharedTx + 4] + coeff[10] * ds_a[ty][sharedTx + 5] + coeff[11] * ds_a[ty - 5][sharedTx] + coeff[12] * ds_a[ty - 4][sharedTx] + coeff[13] * ds_a[ty - 3][sharedTx] + coeff[14] * ds_a[ty - 2][sharedTx] + coeff[15] * ds_a[ty - 1][sharedTx] + coeff[16] * ds_a[ty + 1][sharedTx] + coeff[17] * ds_a[ty + 2][sharedTx] + coeff[18] * ds_a[ty + 3][sharedTx] + coeff[19] * ds_a[ty + 4][sharedTx] + coeff[20] * ds_a[ty + 5][sharedTx] + coeff[21] * behind5 + coeff[22] * behind4 + coeff[23] * behind3 + coeff[24] * behind2 + coeff[25] * behind1 + coeff[26] * infront1 + coeff[27] * infront2 + coeff[28] * infront3 + coeff[29] * infront4 + coeff[30] * infront5; } } /* Optimization ShamZint: for iteration on Z axis without registers + use of shared memory */ __global__ void calcStencilShamZint(float *a, float *b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int out_index = (row * pitchedDimx) + col; // Index for writing output out_index += 4*stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { out_index += stride; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = a[out_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = a[out_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = a[out_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = a[out_index + BLOCK_DIMX]; } // Load current position to shared memory ds_a[ty][sharedTx] = a[out_index]; __syncthreads(); // Compute stencil b[out_index] = coeff[0] * ds_a[ty][sharedTx] + coeff[1] * ds_a[ty][sharedTx - 5] + coeff[2] * ds_a[ty][sharedTx - 4] + coeff[3] * ds_a[ty][sharedTx - 3] + coeff[4] * ds_a[ty][sharedTx - 2] + coeff[5] * ds_a[ty][sharedTx - 1] + coeff[6] * ds_a[ty][sharedTx + 1] + coeff[7] * ds_a[ty][sharedTx + 2] + coeff[8] * ds_a[ty][sharedTx + 3] + coeff[9] * ds_a[ty][sharedTx + 4] + coeff[10] * ds_a[ty][sharedTx + 5] + coeff[11] * ds_a[ty - 5][sharedTx] + coeff[12] * ds_a[ty - 4][sharedTx] + coeff[13] * ds_a[ty - 3][sharedTx] + coeff[14] * ds_a[ty - 2][sharedTx] + coeff[15] * ds_a[ty - 1][sharedTx] + coeff[16] * ds_a[ty + 1][sharedTx] + coeff[17] * ds_a[ty + 2][sharedTx] + coeff[18] * ds_a[ty + 3][sharedTx] + coeff[19] * ds_a[ty + 4][sharedTx] + coeff[20] * ds_a[ty + 5][sharedTx] + coeff[21] * a[out_index - 5*stride] + coeff[22] * a[out_index - 4*stride] + coeff[23] * a[out_index - 3*stride] + coeff[24] * a[out_index - 2*stride] + coeff[25] * a[out_index - stride] + coeff[26] * a[out_index + stride] + coeff[27] * a[out_index + 2*stride] + coeff[28] * a[out_index + 3*stride] + coeff[29] * a[out_index + 4*stride] + coeff[30] * a[out_index + 5*stride]; } } /* Optimization ShamZintTempReg: shared memory + for iteration on Z axis + temporal blocking (will always compute 2 time iterations) */ __global__ void calcStencilShamZintTempReg(float *a, float *b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS][2]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int row = blockIdx.y * (BLOCK_DIMY-2*RADIUS) + ty; int col = blockIdx.x * (BLOCK_DIMX-2*RADIUS) + tx; int stride = pitchedDimx * (dimy + 4*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output int next_index = 0; // Index for plane Z = output + RADIUS // t0 = t + 0 register float t0_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t0_current; // Input value in the current slice // t1 = t + 1 register float t1_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t1_current; // Value in current slice for t+1 // Load ghost zones in_index += RADIUS*stride; t0_behind5 = a[in_index]; // Z = -R = -5 in_index += stride; t0_behind4 = a[in_index]; // Z = -R+1 = -4 in_index += stride; t0_behind3 = a[in_index]; // Z = -R+2 = -3 in_index += stride; t0_behind2 = a[in_index]; // Z = -R+3 = -2 in_index += stride; t0_behind1 = a[in_index]; // Z = -R+4 = -1 in_index += stride; out_index = in_index; // Index for writing output, Z = 0 t0_current = a[in_index]; // Z = 0 in_index += stride; next_index = in_index; // Z = 1 t0_infront1 = a[in_index]; // Z = 1 in_index += stride; t0_infront2 = a[in_index]; // Z = 2 in_index += stride; t0_infront3 = a[in_index]; // Z = 3 in_index += stride; t0_infront4 = a[in_index]; // Z = 4 in_index += stride; t0_infront5 = a[in_index]; // Z = R = 5 in_index += stride; // Load Z = 0 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[out_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[out_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[out_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[out_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 0 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) ) { t1_current = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 5][1] + coeff[2] * ds_a[ty][sharedTx - 4][1] + coeff[3] * ds_a[ty][sharedTx - 3][1] + coeff[4] * ds_a[ty][sharedTx - 2][1] + coeff[5] * ds_a[ty][sharedTx - 1][1] + coeff[6] * ds_a[ty][sharedTx + 1][1] + coeff[7] * ds_a[ty][sharedTx + 2][1] + coeff[8] * ds_a[ty][sharedTx + 3][1] + coeff[9] * ds_a[ty][sharedTx + 4][1] + coeff[10] * ds_a[ty][sharedTx + 5][1] + coeff[11] * ds_a[ty - 5][sharedTx][1] + coeff[12] * ds_a[ty - 4][sharedTx][1] + coeff[13] * ds_a[ty - 3][sharedTx][1] + coeff[14] * ds_a[ty - 2][sharedTx][1] + coeff[15] * ds_a[ty - 1][sharedTx][1] + coeff[16] * ds_a[ty + 1][sharedTx][1] + coeff[17] * ds_a[ty + 2][sharedTx][1] + coeff[18] * ds_a[ty + 3][sharedTx][1] + coeff[19] * ds_a[ty + 4][sharedTx][1] + coeff[20] * ds_a[ty + 5][sharedTx][1] + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_current = t0_current; } // Copy planes Z = -1 to -R to registers in t+1 (ghost zones, keep values in 0.0) t1_behind5 = t0_behind5; t1_behind4 = t0_behind4; t1_behind3 = t0_behind3; t1_behind2 = t0_behind2; t1_behind1 = t0_behind1; __syncthreads(); t0_behind5 = t0_behind4; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = a[in_index]; in_index += stride; // Load Z = 1 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 1 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront1 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 5][1] + coeff[2] * ds_a[ty][sharedTx - 4][1] + coeff[3] * ds_a[ty][sharedTx - 3][1] + coeff[4] * ds_a[ty][sharedTx - 2][1] + coeff[5] * ds_a[ty][sharedTx - 1][1] + coeff[6] * ds_a[ty][sharedTx + 1][1] + coeff[7] * ds_a[ty][sharedTx + 2][1] + coeff[8] * ds_a[ty][sharedTx + 3][1] + coeff[9] * ds_a[ty][sharedTx + 4][1] + coeff[10] * ds_a[ty][sharedTx + 5][1] + coeff[11] * ds_a[ty - 5][sharedTx][1] + coeff[12] * ds_a[ty - 4][sharedTx][1] + coeff[13] * ds_a[ty - 3][sharedTx][1] + coeff[14] * ds_a[ty - 2][sharedTx][1] + coeff[15] * ds_a[ty - 1][sharedTx][1] + coeff[16] * ds_a[ty + 1][sharedTx][1] + coeff[17] * ds_a[ty + 2][sharedTx][1] + coeff[18] * ds_a[ty + 3][sharedTx][1] + coeff[19] * ds_a[ty + 4][sharedTx][1] + coeff[20] * ds_a[ty + 5][sharedTx][1] + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront1 = t0_current; } __syncthreads(); t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = a[in_index]; in_index += stride; next_index += stride; // Load Z = 2 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 2 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront2 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 5][1] + coeff[2] * ds_a[ty][sharedTx - 4][1] + coeff[3] * ds_a[ty][sharedTx - 3][1] + coeff[4] * ds_a[ty][sharedTx - 2][1] + coeff[5] * ds_a[ty][sharedTx - 1][1] + coeff[6] * ds_a[ty][sharedTx + 1][1] + coeff[7] * ds_a[ty][sharedTx + 2][1] + coeff[8] * ds_a[ty][sharedTx + 3][1] + coeff[9] * ds_a[ty][sharedTx + 4][1] + coeff[10] * ds_a[ty][sharedTx + 5][1] + coeff[11] * ds_a[ty - 5][sharedTx][1] + coeff[12] * ds_a[ty - 4][sharedTx][1] + coeff[13] * ds_a[ty - 3][sharedTx][1] + coeff[14] * ds_a[ty - 2][sharedTx][1] + coeff[15] * ds_a[ty - 1][sharedTx][1] + coeff[16] * ds_a[ty + 1][sharedTx][1] + coeff[17] * ds_a[ty + 2][sharedTx][1] + coeff[18] * ds_a[ty + 3][sharedTx][1] + coeff[19] * ds_a[ty + 4][sharedTx][1] + coeff[20] * ds_a[ty + 5][sharedTx][1] + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront2 = t0_current; } __syncthreads(); t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = a[in_index]; in_index += stride; next_index += stride; // Load Z = 3 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 3 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront3 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 5][1] + coeff[2] * ds_a[ty][sharedTx - 4][1] + coeff[3] * ds_a[ty][sharedTx - 3][1] + coeff[4] * ds_a[ty][sharedTx - 2][1] + coeff[5] * ds_a[ty][sharedTx - 1][1] + coeff[6] * ds_a[ty][sharedTx + 1][1] + coeff[7] * ds_a[ty][sharedTx + 2][1] + coeff[8] * ds_a[ty][sharedTx + 3][1] + coeff[9] * ds_a[ty][sharedTx + 4][1] + coeff[10] * ds_a[ty][sharedTx + 5][1] + coeff[11] * ds_a[ty - 5][sharedTx][1] + coeff[12] * ds_a[ty - 4][sharedTx][1] + coeff[13] * ds_a[ty - 3][sharedTx][1] + coeff[14] * ds_a[ty - 2][sharedTx][1] + coeff[15] * ds_a[ty - 1][sharedTx][1] + coeff[16] * ds_a[ty + 1][sharedTx][1] + coeff[17] * ds_a[ty + 2][sharedTx][1] + coeff[18] * ds_a[ty + 3][sharedTx][1] + coeff[19] * ds_a[ty + 4][sharedTx][1] + coeff[20] * ds_a[ty + 5][sharedTx][1] + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront3 = t0_current; } __syncthreads(); t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = a[in_index]; in_index += stride; next_index += stride; // Load Z = 4 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 4 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront4 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 5][1] + coeff[2] * ds_a[ty][sharedTx - 4][1] + coeff[3] * ds_a[ty][sharedTx - 3][1] + coeff[4] * ds_a[ty][sharedTx - 2][1] + coeff[5] * ds_a[ty][sharedTx - 1][1] + coeff[6] * ds_a[ty][sharedTx + 1][1] + coeff[7] * ds_a[ty][sharedTx + 2][1] + coeff[8] * ds_a[ty][sharedTx + 3][1] + coeff[9] * ds_a[ty][sharedTx + 4][1] + coeff[10] * ds_a[ty][sharedTx + 5][1] + coeff[11] * ds_a[ty - 5][sharedTx][1] + coeff[12] * ds_a[ty - 4][sharedTx][1] + coeff[13] * ds_a[ty - 3][sharedTx][1] + coeff[14] * ds_a[ty - 2][sharedTx][1] + coeff[15] * ds_a[ty - 1][sharedTx][1] + coeff[16] * ds_a[ty + 1][sharedTx][1] + coeff[17] * ds_a[ty + 2][sharedTx][1] + coeff[18] * ds_a[ty + 3][sharedTx][1] + coeff[19] * ds_a[ty + 4][sharedTx][1] + coeff[20] * ds_a[ty + 5][sharedTx][1] + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront4 = t0_current; } __syncthreads(); for (int i = 0; i < dimz; i++) { // Load Z = (2R+i) to registers t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = a[in_index]; in_index += stride; next_index += stride; // Load Z = R+i to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = R+i (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (i < dimz-RADIUS) ) { t1_infront5 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 5][1] + coeff[2] * ds_a[ty][sharedTx - 4][1] + coeff[3] * ds_a[ty][sharedTx - 3][1] + coeff[4] * ds_a[ty][sharedTx - 2][1] + coeff[5] * ds_a[ty][sharedTx - 1][1] + coeff[6] * ds_a[ty][sharedTx + 1][1] + coeff[7] * ds_a[ty][sharedTx + 2][1] + coeff[8] * ds_a[ty][sharedTx + 3][1] + coeff[9] * ds_a[ty][sharedTx + 4][1] + coeff[10] * ds_a[ty][sharedTx + 5][1] + coeff[11] * ds_a[ty - 5][sharedTx][1] + coeff[12] * ds_a[ty - 4][sharedTx][1] + coeff[13] * ds_a[ty - 3][sharedTx][1] + coeff[14] * ds_a[ty - 2][sharedTx][1] + coeff[15] * ds_a[ty - 1][sharedTx][1] + coeff[16] * ds_a[ty + 1][sharedTx][1] + coeff[17] * ds_a[ty + 2][sharedTx][1] + coeff[18] * ds_a[ty + 3][sharedTx][1] + coeff[19] * ds_a[ty + 4][sharedTx][1] + coeff[20] * ds_a[ty + 5][sharedTx][1] + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront5 = t0_current; } __syncthreads(); // Load Z = k (t + 1) to shared memory ds_a[ty][sharedTx][0] = t1_current; __syncthreads(); // Compute stencil for Z = k (t + 2) but exclude halo zones if ( (threadIdx.y >= RADIUS) && (threadIdx.y < (BLOCK_DIMY - RADIUS)) && (threadIdx.x >= RADIUS) && (threadIdx.x < (BLOCK_DIMX - RADIUS)) ) { b[out_index] = coeff[0] * t1_current + coeff[1] * ds_a[ty][sharedTx - 5][0] + coeff[2] * ds_a[ty][sharedTx - 4][0] + coeff[3] * ds_a[ty][sharedTx - 3][0] + coeff[4] * ds_a[ty][sharedTx - 2][0] + coeff[5] * ds_a[ty][sharedTx - 1][0] + coeff[6] * ds_a[ty][sharedTx + 1][0] + coeff[7] * ds_a[ty][sharedTx + 2][0] + coeff[8] * ds_a[ty][sharedTx + 3][0] + coeff[9] * ds_a[ty][sharedTx + 4][0] + coeff[10] * ds_a[ty][sharedTx + 5][0] + coeff[11] * ds_a[ty - 5][sharedTx][0] + coeff[12] * ds_a[ty - 4][sharedTx][0] + coeff[13] * ds_a[ty - 3][sharedTx][0] + coeff[14] * ds_a[ty - 2][sharedTx][0] + coeff[15] * ds_a[ty - 1][sharedTx][0] + coeff[16] * ds_a[ty + 1][sharedTx][0] + coeff[17] * ds_a[ty + 2][sharedTx][0] + coeff[18] * ds_a[ty + 3][sharedTx][0] + coeff[19] * ds_a[ty + 4][sharedTx][0] + coeff[20] * ds_a[ty + 5][sharedTx][0] + coeff[21] * t1_behind5 + coeff[22] * t1_behind4 + coeff[23] * t1_behind3 + coeff[24] * t1_behind2 + coeff[25] * t1_behind1 + coeff[26] * t1_infront1 + coeff[27] * t1_infront2 + coeff[28] * t1_infront3 + coeff[29] * t1_infront4 + coeff[30] * t1_infront5; } out_index += stride; t1_behind5 = t1_behind4; t1_behind4 = t1_behind3; t1_behind3 = t1_behind2; t1_behind2 = t1_behind1; t1_behind1 = t1_current; t1_current = t1_infront1; t1_infront1 = t1_infront2; t1_infront2 = t1_infront3; t1_infront3 = t1_infront4; t1_infront4 = t1_infront5; } } /* Optimization Roc: use of read only cache (texture memory) */ __global__ void calcStencilRoc(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Compute stencil b[index] = coeff[0] * __ldg(&a[index]) + coeff[1] * __ldg(&a[index - 5]) + coeff[2] * __ldg(&a[index - 4]) + coeff[3] * __ldg(&a[index - 3]) + coeff[4] * __ldg(&a[index - 2]) + coeff[5] * __ldg(&a[index - 1]) + coeff[6] * __ldg(&a[index + 1]) + coeff[7] * __ldg(&a[index + 2]) + coeff[8] * __ldg(&a[index + 3]) + coeff[9] * __ldg(&a[index + 4]) + coeff[10] * __ldg(&a[index + 5]) + coeff[11] * __ldg(&a[index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[index - pitchedDimx]) + coeff[16] * __ldg(&a[index + pitchedDimx]) + coeff[17] * __ldg(&a[index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[index + 5*pitchedDimx]) + coeff[21] * __ldg(&a[index - 5*stride]) + coeff[22] * __ldg(&a[index - 4*stride]) + coeff[23] * __ldg(&a[index - 3*stride]) + coeff[24] * __ldg(&a[index - 2*stride]) + coeff[25] * __ldg(&a[index - stride]) + coeff[26] * __ldg(&a[index + stride]) + coeff[27] * __ldg(&a[index + 2*stride]) + coeff[28] * __ldg(&a[index + 3*stride]) + coeff[29] * __ldg(&a[index + 4*stride]) + coeff[30] * __ldg(&a[index + 5*stride]); } /* Optimization ShamRoc: use of shared memory + read only cache (texture memory) */ __global__ void calcStencilShamRoc(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy) { // Shared Memory Declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = __ldg(&a[index - (RADIUS*pitchedDimx)]); ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = __ldg(&a[index + (BLOCK_DIMY*pitchedDimx)]); } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = __ldg(&a[index - RADIUS]); ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = __ldg(&a[index + BLOCK_DIMX]); } // Load current position to shared memory ds_a[ty][sharedTx] = __ldg(&a[index]); __syncthreads(); // Compute stencil b[index] = coeff[0] * ds_a[ty][sharedTx] + coeff[1] * ds_a[ty][sharedTx - 5] + coeff[2] * ds_a[ty][sharedTx - 4] + coeff[3] * ds_a[ty][sharedTx - 3] + coeff[4] * ds_a[ty][sharedTx - 2] + coeff[5] * ds_a[ty][sharedTx - 1] + coeff[6] * ds_a[ty][sharedTx + 1] + coeff[7] * ds_a[ty][sharedTx + 2] + coeff[8] * ds_a[ty][sharedTx + 3] + coeff[9] * ds_a[ty][sharedTx + 4] + coeff[10] * ds_a[ty][sharedTx + 5] + coeff[11] * ds_a[ty - 5][sharedTx] + coeff[12] * ds_a[ty - 4][sharedTx] + coeff[13] * ds_a[ty - 3][sharedTx] + coeff[14] * ds_a[ty - 2][sharedTx] + coeff[15] * ds_a[ty - 1][sharedTx] + coeff[16] * ds_a[ty + 1][sharedTx] + coeff[17] * ds_a[ty + 2][sharedTx] + coeff[18] * ds_a[ty + 3][sharedTx] + coeff[19] * ds_a[ty + 4][sharedTx] + coeff[20] * ds_a[ty + 5][sharedTx] + coeff[21] * __ldg(&a[index - 5*stride]) + coeff[22] * __ldg(&a[index - 4*stride]) + coeff[23] * __ldg(&a[index - 3*stride]) + coeff[24] * __ldg(&a[index - 2*stride]) + coeff[25] * __ldg(&a[index - stride]) + coeff[26] * __ldg(&a[index + stride]) + coeff[27] * __ldg(&a[index + 2*stride]) + coeff[28] * __ldg(&a[index + 3*stride]) + coeff[29] * __ldg(&a[index + 4*stride]) + coeff[30] * __ldg(&a[index + 5*stride]); } /* Optimization RocZintReg: use of iteration on Z axis + read only cache (texture memory) */ __global__ void calcStencilRocZintReg(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output register float infront1, infront2, infront3, infront4, infront5; // Variable to store the value in front (in the Z axis) of the current slice register float behind1, behind2, behind3, behind4, behind5; // Variable to store the value behind (in the Z axis) the current slice register float current; // Input value in the current slice // Load initial values (behind5 will be loaded inside the next 'for') behind4 = __ldg(&a[in_index]); in_index += stride; behind3 = __ldg(&a[in_index]); in_index += stride; behind2 = __ldg(&a[in_index]); in_index += stride; behind1 = __ldg(&a[in_index]); in_index += stride; current = __ldg(&a[in_index]); out_index = in_index; in_index += stride; infront1 = __ldg(&a[in_index]); in_index += stride; infront2 = __ldg(&a[in_index]); in_index += stride; infront3 = __ldg(&a[in_index]); in_index += stride; infront4 = __ldg(&a[in_index]); in_index += stride; infront5 = __ldg(&a[in_index]); in_index += stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { // Load the new values in Z axis behind5 = behind4; behind4 = behind3; behind3 = behind2; behind2 = behind1; behind1 = current; current = infront1; infront1 = infront2; infront2 = infront3; infront3 = infront4; infront4 = infront5; infront5 = __ldg(&a[in_index]); in_index += stride; out_index += stride; // Compute stencil b[out_index] = coeff[0] * current + coeff[1] * __ldg(&a[out_index - 5]) + coeff[2] * __ldg(&a[out_index - 4]) + coeff[3] * __ldg(&a[out_index - 3]) + coeff[4] * __ldg(&a[out_index - 2]) + coeff[5] * __ldg(&a[out_index - 1]) + coeff[6] * __ldg(&a[out_index + 1]) + coeff[7] * __ldg(&a[out_index + 2]) + coeff[8] * __ldg(&a[out_index + 3]) + coeff[9] * __ldg(&a[out_index + 4]) + coeff[10] * __ldg(&a[out_index + 5]) + coeff[11] * __ldg(&a[out_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[out_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[out_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[out_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[out_index - pitchedDimx]) + coeff[16] * __ldg(&a[out_index + pitchedDimx]) + coeff[17] * __ldg(&a[out_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[out_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[out_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[out_index + 5*pitchedDimx]) + coeff[21] * behind5 + coeff[22] * behind4 + coeff[23] * behind3 + coeff[24] * behind2 + coeff[25] * behind1 + coeff[26] * infront1 + coeff[27] * infront2 + coeff[28] * infront3 + coeff[29] * infront4 + coeff[30] * infront5; } } /* Optimization RocZint: use of iteration on Z axis without registers + read only cache (texture memory) */ __global__ void calcStencilRocZint(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int out_index = (row * pitchedDimx) + col; // Index for reading Z values out_index += 4*stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { out_index += stride; // Compute stencil b[out_index] = coeff[0] * __ldg(&a[out_index]) + coeff[1] * __ldg(&a[out_index - 5]) + coeff[2] * __ldg(&a[out_index - 4]) + coeff[3] * __ldg(&a[out_index - 3]) + coeff[4] * __ldg(&a[out_index - 2]) + coeff[5] * __ldg(&a[out_index - 1]) + coeff[6] * __ldg(&a[out_index + 1]) + coeff[7] * __ldg(&a[out_index + 2]) + coeff[8] * __ldg(&a[out_index + 3]) + coeff[9] * __ldg(&a[out_index + 4]) + coeff[10] * __ldg(&a[out_index + 5]) + coeff[11] * __ldg(&a[out_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[out_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[out_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[out_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[out_index - pitchedDimx]) + coeff[16] * __ldg(&a[out_index + pitchedDimx]) + coeff[17] * __ldg(&a[out_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[out_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[out_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[out_index + 5*pitchedDimx]) + coeff[21] * __ldg(&a[out_index - 5*stride]) + coeff[22] * __ldg(&a[out_index - 4*stride]) + coeff[23] * __ldg(&a[out_index - 3*stride]) + coeff[24] * __ldg(&a[out_index - 2*stride]) + coeff[25] * __ldg(&a[out_index - stride]) + coeff[26] * __ldg(&a[out_index + stride]) + coeff[27] * __ldg(&a[out_index + 2*stride]) + coeff[28] * __ldg(&a[out_index + 3*stride]) + coeff[29] * __ldg(&a[out_index + 4*stride]) + coeff[30] * __ldg(&a[out_index + 5*stride]); } } /* Optimization ShamRocZintTempReg: shared memory + for iteration on Z axis + temporal blocking (will always compute 2 time iterations) */ __global__ void calcStencilShamRocZintTempReg(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY][BLOCK_DIMX]; int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * (BLOCK_DIMY-2*RADIUS) + ty; int col = blockIdx.x * (BLOCK_DIMX-2*RADIUS) + tx; int stride = pitchedDimx * (dimy + 4*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output int next_index = 0; // Index for plane Z = output + RADIUS // t0 = t + 0 register float t0_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t0_current; // Input value in the current slice // t1 = t + 1 register float t1_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t1_current; // Value in current slice for t+1 // Load ghost zones in_index += RADIUS*stride; t0_behind5 = __ldg(&a[in_index]); // Z = -R = -5 in_index += stride; t0_behind4 = __ldg(&a[in_index]); // Z = -R+1 = -4 in_index += stride; t0_behind3 = __ldg(&a[in_index]); // Z = -R+2 = -3 in_index += stride; t0_behind2 = __ldg(&a[in_index]); // Z = -R+3 = -2 in_index += stride; t0_behind1 = __ldg(&a[in_index]); // Z = -R+4 = -1 in_index += stride; out_index = in_index; // Index for writing output, Z = 0 t0_current = __ldg(&a[in_index]); // Z = 0 in_index += stride; next_index = in_index; // Z = 1 t0_infront1 = __ldg(&a[in_index]); // Z = 1 in_index += stride; t0_infront2 = __ldg(&a[in_index]); // Z = 2 in_index += stride; t0_infront3 = __ldg(&a[in_index]); // Z = 3 in_index += stride; t0_infront4 = __ldg(&a[in_index]); // Z = 4 in_index += stride; t0_infront5 = __ldg(&a[in_index]); // Z = R = 5 in_index += stride; // Compute stencil for Z = 0 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) ) { t1_current = coeff[0] * t0_current + coeff[1] * __ldg(&a[out_index - 5]) + coeff[2] * __ldg(&a[out_index - 4]) + coeff[3] * __ldg(&a[out_index - 3]) + coeff[4] * __ldg(&a[out_index - 2]) + coeff[5] * __ldg(&a[out_index - 1]) + coeff[6] * __ldg(&a[out_index + 1]) + coeff[7] * __ldg(&a[out_index + 2]) + coeff[8] * __ldg(&a[out_index + 3]) + coeff[9] * __ldg(&a[out_index + 4]) + coeff[10] * __ldg(&a[out_index + 5]) + coeff[11] * __ldg(&a[out_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[out_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[out_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[out_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[out_index - pitchedDimx]) + coeff[16] * __ldg(&a[out_index + pitchedDimx]) + coeff[17] * __ldg(&a[out_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[out_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[out_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[out_index + 5*pitchedDimx]) + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_current = t0_current; } // Copy planes Z = -1 to -R to registers in t+1 (ghost zones, keep values in 0.0) t1_behind5 = t0_behind5; t1_behind4 = t0_behind4; t1_behind3 = t0_behind3; t1_behind2 = t0_behind2; t1_behind1 = t0_behind1; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = __ldg(&a[in_index]); in_index += stride; // Compute stencil for Z = 1 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront1 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 5]) + coeff[2] * __ldg(&a[next_index - 4]) + coeff[3] * __ldg(&a[next_index - 3]) + coeff[4] * __ldg(&a[next_index - 2]) + coeff[5] * __ldg(&a[next_index - 1]) + coeff[6] * __ldg(&a[next_index + 1]) + coeff[7] * __ldg(&a[next_index + 2]) + coeff[8] * __ldg(&a[next_index + 3]) + coeff[9] * __ldg(&a[next_index + 4]) + coeff[10] * __ldg(&a[next_index + 5]) + coeff[11] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - pitchedDimx]) + coeff[16] * __ldg(&a[next_index + pitchedDimx]) + coeff[17] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront1 = t0_current; } t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = 2 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront2 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 5]) + coeff[2] * __ldg(&a[next_index - 4]) + coeff[3] * __ldg(&a[next_index - 3]) + coeff[4] * __ldg(&a[next_index - 2]) + coeff[5] * __ldg(&a[next_index - 1]) + coeff[6] * __ldg(&a[next_index + 1]) + coeff[7] * __ldg(&a[next_index + 2]) + coeff[8] * __ldg(&a[next_index + 3]) + coeff[9] * __ldg(&a[next_index + 4]) + coeff[10] * __ldg(&a[next_index + 5]) + coeff[11] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - pitchedDimx]) + coeff[16] * __ldg(&a[next_index + pitchedDimx]) + coeff[17] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront2 = t0_current; } t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = 3 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront3 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 5]) + coeff[2] * __ldg(&a[next_index - 4]) + coeff[3] * __ldg(&a[next_index - 3]) + coeff[4] * __ldg(&a[next_index - 2]) + coeff[5] * __ldg(&a[next_index - 1]) + coeff[6] * __ldg(&a[next_index + 1]) + coeff[7] * __ldg(&a[next_index + 2]) + coeff[8] * __ldg(&a[next_index + 3]) + coeff[9] * __ldg(&a[next_index + 4]) + coeff[10] * __ldg(&a[next_index + 5]) + coeff[11] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - pitchedDimx]) + coeff[16] * __ldg(&a[next_index + pitchedDimx]) + coeff[17] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront3 = t0_current; } t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = 3 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront4 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 5]) + coeff[2] * __ldg(&a[next_index - 4]) + coeff[3] * __ldg(&a[next_index - 3]) + coeff[4] * __ldg(&a[next_index - 2]) + coeff[5] * __ldg(&a[next_index - 1]) + coeff[6] * __ldg(&a[next_index + 1]) + coeff[7] * __ldg(&a[next_index + 2]) + coeff[8] * __ldg(&a[next_index + 3]) + coeff[9] * __ldg(&a[next_index + 4]) + coeff[10] * __ldg(&a[next_index + 5]) + coeff[11] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - pitchedDimx]) + coeff[16] * __ldg(&a[next_index + pitchedDimx]) + coeff[17] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront4 = t0_current; } for (int i = 0; i < dimz; i++) { // Load Z = (2R+i) to registers t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = R+i (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (i < dimz-RADIUS) ) { t1_infront5 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 5]) + coeff[2] * __ldg(&a[next_index - 4]) + coeff[3] * __ldg(&a[next_index - 3]) + coeff[4] * __ldg(&a[next_index - 2]) + coeff[5] * __ldg(&a[next_index - 1]) + coeff[6] * __ldg(&a[next_index + 1]) + coeff[7] * __ldg(&a[next_index + 2]) + coeff[8] * __ldg(&a[next_index + 3]) + coeff[9] * __ldg(&a[next_index + 4]) + coeff[10] * __ldg(&a[next_index + 5]) + coeff[11] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[12] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[13] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - pitchedDimx]) + coeff[16] * __ldg(&a[next_index + pitchedDimx]) + coeff[17] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[19] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[21] * t0_behind5 + coeff[22] * t0_behind4 + coeff[23] * t0_behind3 + coeff[24] * t0_behind2 + coeff[25] * t0_behind1 + coeff[26] * t0_infront1 + coeff[27] * t0_infront2 + coeff[28] * t0_infront3 + coeff[29] * t0_infront4 + coeff[30] * t0_infront5; } else { t1_infront5 = t0_current; } __syncthreads(); // Load Z = k (t + 1) to shared memory ds_a[threadIdx.y][threadIdx.x] = t1_current; __syncthreads(); // Compute stencil for Z = k (t + 2) but exclude halo zones if ( (threadIdx.y >= RADIUS) && (threadIdx.y < (BLOCK_DIMY - RADIUS)) && (threadIdx.x >= RADIUS) && (threadIdx.x < (BLOCK_DIMX - RADIUS)) ) { b[out_index] = coeff[0] * t1_current + coeff[1] * ds_a[threadIdx.y][threadIdx.x - 5] + coeff[2] * ds_a[threadIdx.y][threadIdx.x - 4] + coeff[3] * ds_a[threadIdx.y][threadIdx.x - 3] + coeff[4] * ds_a[threadIdx.y][threadIdx.x - 2] + coeff[5] * ds_a[threadIdx.y][threadIdx.x - 1] + coeff[6] * ds_a[threadIdx.y][threadIdx.x + 1] + coeff[7] * ds_a[threadIdx.y][threadIdx.x + 2] + coeff[8] * ds_a[threadIdx.y][threadIdx.x + 3] + coeff[9] * ds_a[threadIdx.y][threadIdx.x + 4] + coeff[10] * ds_a[threadIdx.y][threadIdx.x + 5] + coeff[11] * ds_a[threadIdx.y - 5][threadIdx.x] + coeff[12] * ds_a[threadIdx.y - 4][threadIdx.x] + coeff[13] * ds_a[threadIdx.y - 3][threadIdx.x] + coeff[14] * ds_a[threadIdx.y - 2][threadIdx.x] + coeff[15] * ds_a[threadIdx.y - 1][threadIdx.x] + coeff[16] * ds_a[threadIdx.y + 1][threadIdx.x] + coeff[17] * ds_a[threadIdx.y + 2][threadIdx.x] + coeff[18] * ds_a[threadIdx.y + 3][threadIdx.x] + coeff[19] * ds_a[threadIdx.y + 4][threadIdx.x] + coeff[20] * ds_a[threadIdx.y + 5][threadIdx.x] + coeff[21] * t1_behind5 + coeff[22] * t1_behind4 + coeff[23] * t1_behind3 + coeff[24] * t1_behind2 + coeff[25] * t1_behind1 + coeff[26] * t1_infront1 + coeff[27] * t1_infront2 + coeff[28] * t1_infront3 + coeff[29] * t1_infront4 + coeff[30] * t1_infront5; } out_index += stride; t1_behind5 = t1_behind4; t1_behind4 = t1_behind3; t1_behind3 = t1_behind2; t1_behind2 = t1_behind1; t1_behind1 = t1_current; t1_current = t1_infront1; t1_infront1 = t1_infront2; t1_infront2 = t1_infront3; t1_infront3 = t1_infront4; t1_infront4 = t1_infront5; } } void initGold(float *a, int dimx, int dimy, int dimz, int pitchedDimx) { int stride = pitchedDimx * (dimy+2*RADIUS); int index = 0; for (int i = 0; i < (dimz+2*RADIUS); i++) { for (int j = 0; j < (dimy+2*RADIUS); j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if (i<RADIUS || j<RADIUS || i>=dimz+RADIUS || j>=dimy+RADIUS || k<PADDING_SIZE || k>=dimx+PADDING_SIZE) { a[index] = 0.0; } else { a[index] = 1.0; } } } } } void initGoldTemporal(float *a, int dimx, int dimy, int dimz, int pitchedDimx) { int stride = pitchedDimx * (dimy+4*RADIUS); int index = 0; for (int i = 0; i < (dimz+4*RADIUS); i++) { for (int j = 0; j < (dimy+4*RADIUS); j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if ( i<2*RADIUS || j<2*RADIUS || i>=dimz+2*RADIUS || j>=dimy+2*RADIUS || k<PADDING_SIZE || k>=dimx+PADDING_SIZE ) { a[index] = 0.0; } else { a[index] = 1.0; } } } } } void hostStencil(float *a, int t_end, int dimx, int dimy, int dimz, float *hcoeff, int pitchedDimx) { float *b; int stride = pitchedDimx * (dimy+2*RADIUS); b = (float *)malloc((dimz+2*RADIUS) * stride * sizeof(float)); initGold(b, dimx, dimy, dimz, pitchedDimx); int index = 0; for (int t = 0; t < t_end; t++) { for (int i = RADIUS; i < dimz+RADIUS; i++) { for (int j = RADIUS; j < dimy+RADIUS; j++) { for (int k = PADDING_SIZE; k < dimx+PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; if (t%2) { a[index] = hcoeff[0] * b[index] + hcoeff[1] * b[index - 5] + hcoeff[2] * b[index - 4] + hcoeff[3] * b[index - 3] + hcoeff[4] * b[index - 2] + hcoeff[5] * b[index - 1] + hcoeff[6] * b[index + 1] + hcoeff[7] * b[index + 2] + hcoeff[8] * b[index + 3] + hcoeff[9] * b[index + 4] + hcoeff[10] * b[index + 5] + hcoeff[11] * b[index - 5*pitchedDimx] + hcoeff[12] * b[index - 4*pitchedDimx] + hcoeff[13] * b[index - 3*pitchedDimx] + hcoeff[14] * b[index - 2*pitchedDimx] + hcoeff[15] * b[index - pitchedDimx] + hcoeff[16] * b[index + pitchedDimx] + hcoeff[17] * b[index + 2*pitchedDimx] + hcoeff[18] * b[index + 3*pitchedDimx] + hcoeff[19] * b[index + 4*pitchedDimx] + hcoeff[20] * b[index + 5*pitchedDimx] + hcoeff[21] * b[index - 5*stride] + hcoeff[22] * b[index - 4*stride] + hcoeff[23] * b[index - 3*stride] + hcoeff[24] * b[index - 2*stride] + hcoeff[25] * b[index - stride] + hcoeff[26] * b[index + stride] + hcoeff[27] * b[index + 2*stride] + hcoeff[28] * b[index + 3*stride] + hcoeff[29] * b[index + 4*stride] + hcoeff[30] * b[index + 5*stride]; } else { b[index] = hcoeff[0] * a[index] + hcoeff[1] * a[index - 5] + hcoeff[2] * a[index - 4] + hcoeff[3] * a[index - 3] + hcoeff[4] * a[index - 2] + hcoeff[5] * a[index - 1] + hcoeff[6] * a[index + 1] + hcoeff[7] * a[index + 2] + hcoeff[8] * a[index + 3] + hcoeff[9] * a[index + 4] + hcoeff[10] * a[index + 5] + hcoeff[11] * a[index - 5*pitchedDimx] + hcoeff[12] * a[index - 4*pitchedDimx] + hcoeff[13] * a[index - 3*pitchedDimx] + hcoeff[14] * a[index - 2*pitchedDimx] + hcoeff[15] * a[index - pitchedDimx] + hcoeff[16] * a[index + pitchedDimx] + hcoeff[17] * a[index + 2*pitchedDimx] + hcoeff[18] * a[index + 3*pitchedDimx] + hcoeff[19] * a[index + 4*pitchedDimx] + hcoeff[20] * a[index + 5*pitchedDimx] + hcoeff[21] * a[index - 5*stride] + hcoeff[22] * a[index - 4*stride] + hcoeff[23] * a[index - 3*stride] + hcoeff[24] * a[index - 2*stride] + hcoeff[25] * a[index - stride] + hcoeff[26] * a[index + stride] + hcoeff[27] * a[index + 2*stride] + hcoeff[28] * a[index + 3*stride] + hcoeff[29] * a[index + 4*stride] + hcoeff[30] * a[index + 5*stride]; } } } } } if (t_end%2) { for (int i = RADIUS; i < dimz+RADIUS; i++) { for (int j = RADIUS; j < dimy+RADIUS; j++) { for (int k = PADDING_SIZE; k < dimx+PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; a[index] = b[index]; } } } } free(b); } void hostStencilTemporal(float *a, int t_end, int dimx, int dimy, int dimz, float *hcoeff, int pitchedDimx) { float *b; int stride = pitchedDimx * (dimy+4*RADIUS); b = (float *)malloc((dimz+2*RADIUS) * stride * sizeof(float)); initGoldTemporal(b, dimx, dimy, dimz, pitchedDimx); int index = 0; for (int t = 0; t < t_end; t++) { for (int i = 2*RADIUS; i < dimz+2*RADIUS; i++) { for (int j = 2*RADIUS; j < dimy+2*RADIUS; j++) { for (int k = PADDING_SIZE; k < pitchedDimx-PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; if (t%2) { a[index] = hcoeff[0] * b[index] + hcoeff[1] * b[index - 5] + hcoeff[2] * b[index - 4] + hcoeff[3] * b[index - 3] + hcoeff[4] * b[index - 2] + hcoeff[5] * b[index - 1] + hcoeff[6] * b[index + 1] + hcoeff[7] * b[index + 2] + hcoeff[8] * b[index + 3] + hcoeff[9] * b[index + 4] + hcoeff[10] * b[index + 5] + hcoeff[11] * b[index - 5*pitchedDimx] + hcoeff[12] * b[index - 4*pitchedDimx] + hcoeff[13] * b[index - 3*pitchedDimx] + hcoeff[14] * b[index - 2*pitchedDimx] + hcoeff[15] * b[index - pitchedDimx] + hcoeff[16] * b[index + pitchedDimx] + hcoeff[17] * b[index + 2*pitchedDimx] + hcoeff[18] * b[index + 3*pitchedDimx] + hcoeff[19] * b[index + 4*pitchedDimx] + hcoeff[20] * b[index + 5*pitchedDimx] + hcoeff[21] * b[index - 5*stride] + hcoeff[22] * b[index - 4*stride] + hcoeff[23] * b[index - 3*stride] + hcoeff[24] * b[index - 2*stride] + hcoeff[25] * b[index - stride] + hcoeff[26] * b[index + stride] + hcoeff[27] * b[index + 2*stride] + hcoeff[28] * b[index + 3*stride] + hcoeff[29] * b[index + 4*stride] + hcoeff[30] * b[index + 5*stride]; } else { b[index] = hcoeff[0] * a[index] + hcoeff[1] * a[index - 5] + hcoeff[2] * a[index - 4] + hcoeff[3] * a[index - 3] + hcoeff[4] * a[index - 2] + hcoeff[5] * a[index - 1] + hcoeff[6] * a[index + 1] + hcoeff[7] * a[index + 2] + hcoeff[8] * a[index + 3] + hcoeff[9] * a[index + 4] + hcoeff[10] * a[index + 5] + hcoeff[11] * a[index - 5*pitchedDimx] + hcoeff[12] * a[index - 4*pitchedDimx] + hcoeff[13] * a[index - 3*pitchedDimx] + hcoeff[14] * a[index - 2*pitchedDimx] + hcoeff[15] * a[index - pitchedDimx] + hcoeff[16] * a[index + pitchedDimx] + hcoeff[17] * a[index + 2*pitchedDimx] + hcoeff[18] * a[index + 3*pitchedDimx] + hcoeff[19] * a[index + 4*pitchedDimx] + hcoeff[20] * a[index + 5*pitchedDimx] + hcoeff[21] * a[index - 5*stride] + hcoeff[22] * a[index - 4*stride] + hcoeff[23] * a[index - 3*stride] + hcoeff[24] * a[index - 2*stride] + hcoeff[25] * a[index - stride] + hcoeff[26] * a[index + stride] + hcoeff[27] * a[index + 2*stride] + hcoeff[28] * a[index + 3*stride] + hcoeff[29] * a[index + 4*stride] + hcoeff[30] * a[index + 5*stride]; } } } } } if (t_end%2) { for (int i = 2*RADIUS; i < dimz+2*RADIUS; i++) { for (int j = 2*RADIUS; j < dimy+2*RADIUS; j++) { for (int k = PADDING_SIZE; k < pitchedDimx-PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; a[index] = b[index]; } } } } free(b); } void printMatrix(float *a, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+2*RADIUS); for (int i=0; i < dimz+2*RADIUS; i++) { for (int j=0; j < dimy+2*RADIUS; j++) { for (int k=0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; printf("%f, ",a[index]); } printf("\n"); } printf("\n"); } } void printMatrixTemporal(float *a, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+4*RADIUS); for (int i=0; i < dimz+4*RADIUS; i++) { for (int j=0; j < dimy+4*RADIUS; j++) { for (int k=0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; printf("%f, ",a[index]); } printf("\n"); } printf("\n"); } } bool checkResult(float *a, float *ref, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+2*RADIUS); for (int i = 0; i < dimz+2*RADIUS; i++) { for (int j = 0; j < dimy+2*RADIUS; j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if (a[index] != ref[index]) { printf("Expected: %f, received: %f at position [z=%d,y=%d,x=%d]\n",ref[index],a[index],i,j,k); return 0; } } } } return 1; } bool checkResultTemporal(float *a, float *ref, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+4*RADIUS); for (int i = 0; i < dimz+4*RADIUS; i++) { for (int j = 0; j < dimy+4*RADIUS; j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if (a[index] != ref[index]) { printf("Expected: %f, received: %f at position [z=%d,y=%d,x=%d]\n",ref[index],a[index],i,j,k); return 0; } } } } return 1; } int main(int argc, char* argv[]) { float *h_a, *h_gold_a; float *d_a, *d_b; float hcoeff[RADIUS*6+1] = {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}; cudaEvent_t t0, t1, t2, t3, t4, t5; float init, host_comp, host2gpu, gpu2host, gpu_comp, tot; int dimx, dimy, dimz, t_end; long points, flop; float gFlops; int opt; // Variable to select the optimization char vbs = 0; if (argc == 7) { vbs = 1; } else { if (argc != 6) { printf("use: <exec> <OPT> <DIMX> <DIMY> <DIMZ> <T_END> <VBS(1)>\n" "Available optimizations (value should be used as the first parameter in the command line):\n" "0 - Base -> no optimization\n" "1 - Sham -> shared memory\n" "2 - ZintReg -> for iteration on Z axis (Paulius)\n" "3 - Zint -> for iteration on Z axis without using registers\n" "4 - ShamZintReg -> shared memory + for iteration on Z axis\n" "5 - ShamZint -> shared memory + for iteration on Z axis without registers\n" "6 - ShamZintTempReg -> shared memory + for iteration on Z axis + temporal blocking\n" "7 - Roc -> use of read only cache (__restrict__ and const modifiers)\n" "8 - ShamRoc -> use of shared memory + read only cache (__restrict__ and const modifiers)\n" "9 - RocZintReg -> for iteration on Z axis + read only cache\n" "10 - RocZint -> for iteration on Z axis without registers + read only cache\n" "11 - ShamRocZintTempReg -> shared memory + read only cache + for iteration on Z axis + temporal blocking\n" ); exit(-1); } } opt = atoi(argv[1]); dimx = atoi(argv[2]); dimy = atoi(argv[3]); dimz = atoi(argv[4]); t_end = atoi(argv[5]); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); cudaEventCreate(&t0); cudaEventCreate(&t1); cudaEventCreate(&t2); cudaEventCreate(&t3); cudaEventCreate(&t4); cudaEventCreate(&t5); int pitchedDimx = dimx + 2*PADDING_SIZE; int gold_size; // If temporal blocking is requested, allocate more device memory if ( (opt == 6) || (opt == 11) ) { gold_size = pitchedDimx * (dimy+4*RADIUS) * (dimz+4*RADIUS) * sizeof(float); // Check if the number of iterations is even if ( (t_end%2) != 0) { if (vbs == 0) printf("Number of time iterations is odd, adding one iteration!\n"); t_end++; } } else { gold_size = pitchedDimx * (dimy+2*RADIUS) * (dimz+2*RADIUS) * sizeof(float); } points = (long)dimx * (long)dimy * (long)dimz * (long)t_end; flop = (long)(30 + 31) * points; // 30 adds, 31 multiplies cudaEventRecord(t0); /* allocate device variables */ wbCheck(cudaMalloc((void**) &d_a, gold_size)); wbCheck(cudaMalloc((void**) &d_b, gold_size)); /* allocate host variables */ h_a = (float *)malloc(gold_size); h_gold_a = (float *)malloc(gold_size); if ( (opt == 6) || (opt == 11) ) { initGoldTemporal(h_a, dimx, dimy, dimz, pitchedDimx); initGoldTemporal(h_gold_a, dimx, dimy, dimz, pitchedDimx); } else { initGold(h_a, dimx, dimy, dimz, pitchedDimx); initGold(h_gold_a, dimx, dimy, dimz, pitchedDimx); } cudaEventRecord(t1); if (vbs == 0) { if ( (opt == 6) || (opt == 11) ) { hostStencilTemporal(h_gold_a, t_end, dimx, dimy, dimz, hcoeff, pitchedDimx); } else { hostStencil(h_gold_a, t_end, dimx, dimy, dimz, hcoeff, pitchedDimx); } } #ifdef PRINT_GOLD if ( (opt == 6) || (opt == 11) ) { printMatrixTemporal(h_gold_a, pitchedDimx, dimy, dimz); } else { printMatrix(h_gold_a, pitchedDimx, dimy, dimz); } #endif cudaEventRecord(t2); wbCheck(cudaMemcpyToSymbol(coeff, hcoeff, sizeof(hcoeff))); wbCheck(cudaMemcpy(d_a, h_a, gold_size, cudaMemcpyHostToDevice)); // Initialize device values wbCheck(cudaMemcpy(d_b, d_a, gold_size, cudaMemcpyDeviceToDevice)); // Copy contents from d_a to d_b cudaEventRecord(t3); dim3 dimBlock; dim3 dimGrid; switch (opt) { case 0: if (vbs == 0) printf("Optimization level: 0 - Base\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilBase <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy); } else { calcStencilBase <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy); } wbCheck(cudaGetLastError()); } break; case 1: if (vbs == 0) printf("Optimization level: 1 - Sham\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilSham <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy); } else { calcStencilSham <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy); } wbCheck(cudaGetLastError()); } break; case 2: if (vbs == 0) printf("Optimization level: 2 - ZintReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilZintReg <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilZintReg <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; case 3: if (vbs == 0) printf("Optimization level: 3 - Zint\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilZint <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilZint <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; case 4: if (vbs == 0) printf("Optimization level: 4 - ShamZintReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilShamZintReg <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilShamZintReg <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; case 5: if (vbs == 0) printf("Optimization level: 5 - ShamZint\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilShamZint <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilShamZint <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; case 6: if (vbs == 0) printf("Optimization level: 6 - ShamZintTempReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/(BLOCK_DIMX-2*RADIUS)); dimGrid.y = (int)ceil(dimy/(BLOCK_DIMY-2*RADIUS)); dimGrid.z = 1; for (int i = 0; i < t_end/2; i++) { if (i%2) { calcStencilShamZintTempReg <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilShamZintTempReg <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; case 7: if (vbs == 0) printf("Optimization level: 7 - Roc\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilRoc <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy); } else { calcStencilRoc <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy); } wbCheck(cudaGetLastError()); } break; case 8: if (vbs == 0) printf("Optimization level: 8 - ShamRoc\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilShamRoc <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy); } else { calcStencilShamRoc <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy); } wbCheck(cudaGetLastError()); } break; case 9: if (vbs == 0) printf("Optimization level: 9 - RocZintReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilRocZintReg <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilRocZintReg <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; case 10: if (vbs == 0) printf("Optimization level: 10 - RocZint\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilRocZint <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilRocZint <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; case 11: if (vbs == 0) printf("Optimization level: 11 - ShamRocZintTempReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/(BLOCK_DIMX-2*RADIUS)); dimGrid.y = (int)ceil(dimy/(BLOCK_DIMY-2*RADIUS)); dimGrid.z = 1; for (int i = 0; i < t_end/2; i++) { if (i%2) { calcStencilShamRocZintTempReg <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilShamRocZintTempReg <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; default: printf("Invalid optimization selected\n"); break; } cudaEventRecord(t4); cudaDeviceSynchronize(); if ( (opt == 6) || (opt == 11) ) { if ((t_end/2)%2) { wbCheck(cudaMemcpy(h_a, d_b, gold_size, cudaMemcpyDeviceToHost)); } else { wbCheck(cudaMemcpy(h_a, d_a, gold_size, cudaMemcpyDeviceToHost)); } } else { if (t_end%2) { wbCheck(cudaMemcpy(h_a, d_b, gold_size, cudaMemcpyDeviceToHost)); } else { wbCheck(cudaMemcpy(h_a, d_a, gold_size, cudaMemcpyDeviceToHost)); } } cudaEventRecord(t5); cudaFree(d_a); cudaFree(d_b); #ifdef PRINT_RESULT if ( (opt == 6) || (opt == 11) ) { printMatrixTemporal(h_a,pitchedDimx,dimy,dimz); } else { printMatrix(h_a,pitchedDimx,dimy,dimz); } #endif if (vbs == 0) { if ( (opt == 6) || (opt == 11) ) { if (checkResultTemporal(h_a,h_gold_a,pitchedDimx,dimy,dimz)) { printf("Correct results!\n"); } else { printf("Wrong results!!!!!!\n"); } } else { if (checkResult(h_a,h_gold_a,pitchedDimx,dimy,dimz)) { printf("Correct results!\n"); } else { printf("Wrong results!!!!!!\n"); } } } cudaEventSynchronize(t5); cudaEventElapsedTime(&init, t0, t1); cudaEventElapsedTime(&host_comp, t1, t2); cudaEventElapsedTime(&host2gpu, t2, t3); cudaEventElapsedTime(&gpu_comp, t3, t4); cudaEventElapsedTime(&gpu2host, t4, t5); cudaEventElapsedTime(&tot, t0, t5); gFlops = (1.0e-6)*flop/gpu_comp; free(h_a); free(h_gold_a); if (vbs == 0) { printf("GPU Clock: %d MHz\n",prop.clockRate/1000); printf("DIM = %dx%dx%d; T_END = %d; BLOCK_WIDTH = %dx%dx%d\n", dimx,dimy,dimz,t_end,BLOCK_DIMX,BLOCK_DIMY,BLOCK_DIMZ); printf("init=%f, host_comp=%f, host2gpu=%f, gpu_comp=%f, gpu2host=%f, tot=%f \n", init, host_comp, host2gpu, gpu_comp, gpu2host, tot); printf("Stencil Throughput: %f Gpts/s\n", (1.0e-6*points)/gpu_comp); // gpu_comp is measured in ms printf("gFlops = %f GFLOPs\n", gFlops); printf("\n"); } else { printf("%d,%d,%d,%f,%f\n", dimx,dimy,dimz,gFlops,gpu_comp); } return 0; }
5b33cc6a49bb2a1771c52f29f08af5d10ce285f7.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* ********************************* BLUEBOTTLE ********************************** ******************************************************************************* * * Copyright 2012 - 2015 Adam Sierakowski, The Johns Hopkins University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Please contact the Johns Hopkins University to use Bluebottle for * commercial and/or for-profit applications. ******************************************************************************/ extern "C" { #include "bluebottle.h" } #include <hip/hip_runtime.h> #include "cuda_quadrature.h" extern "C" void cuda_U_star_test_exp(void) { int i, j, k; // iterators int C; // cell locations real *u_a = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_c = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_e = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfx.s3b * sizeof(real); real *v_a = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_c = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_e = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfy.s3b * sizeof(real); real *w_a = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_c = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_e = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfz.s3b * sizeof(real); // min and max error search real u_err_min = FLT_MAX; real u_err_max = FLT_MIN; real v_err_min = FLT_MAX; real v_err_max = FLT_MIN; real w_err_min = FLT_MAX; real w_err_max = FLT_MIN; printf("\nIntermediate velocity calculation validation:\n\n"); printf(" u = exp(x), v = exp(y), w = exp(z)\n\n"); // set up expected solution for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_a[C] = nu; u_a[C] -= 2 * exp((i-1.5)*Dom.dx); u_a[C] -= exp((j-1.0)*Dom.dy); u_a[C] -= exp((k-1.0)*Dom.dz); u_a[C] *= dt * exp((i-1.5)*Dom.dx); u_a[C] += exp((i-1.5)*Dom.dx); u[C] = u_a[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_a[C] = nu; v_a[C] -= 2 * exp((j-1.5)*Dom.dy); v_a[C] -= exp((k-1.0)*Dom.dz); v_a[C] -= exp((i-1.0)*Dom.dx); v_a[C] *= dt * exp((j-1.5)*Dom.dy); v_a[C] += exp((j-1.5)*Dom.dy); v[C] = v_a[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_a[C] = nu; w_a[C] -= 2 * exp((k-1.5)*Dom.dz); w_a[C] -= exp((i-1.0)*Dom.dx); w_a[C] -= exp((j-1.0)*Dom.dy); w_a[C] *= dt * exp((k-1.5)*Dom.dz); w_a[C] += exp((k-1.5)*Dom.dz); w[C] = w_a[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // initialize input velocity fields for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u[C] = exp((i-1.5)*Dom.dx); } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v[C] = exp((j-1.5)*Dom.dy); } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w[C] = exp((k-1.5)*Dom.dz); } } } // write initial fields rec_paraview_stepnum_out++; printf(" Writing initial fields to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // push fields to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // call code to test #ifdef EXPLICIT printf(" Running cuda_U_star_2()..."); cuda_U_star_2(); printf("done.\n"); #endif // pull fields back to host printf(" Pulling fields back to host..."); cuda_dom_pull(); printf("done.\n"); // write computed solution rec_paraview_stepnum_out++; printf("\n Writing computed solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // copy results and compute error for(k = Dom.Gfx.ks; k < Dom.Gfx.ke; k++) { for(j = Dom.Gfx.js; j < Dom.Gfx.je; j++) { for(i = Dom.Gfx.is; i < Dom.Gfx.ie; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_c[C] = u[C]; u_e[C] = (u_c[C] - u_a[C]) / u_a[C]; if(fabs(u_e[C]) > u_err_max) u_err_max = fabs(u_e[C]); if(fabs(u_e[C]) < u_err_min) u_err_min = fabs(u_e[C]); u[C] = u_e[C]; } } } for(k = Dom.Gfy.ks; k < Dom.Gfy.ke; k++) { for(j = Dom.Gfy.js; j < Dom.Gfy.je; j++) { for(i = Dom.Gfy.is; i < Dom.Gfy.ie; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_c[C] = v[C]; v_e[C] = (v_c[C] - v_a[C]) / v_a[C]; if(fabs(v_e[C]) > v_err_max) v_err_max = fabs(v_e[C]); if(fabs(v_e[C]) < v_err_min) v_err_min = fabs(v_e[C]); v[C] = v_e[C]; } } } for(k = Dom.Gfz.ks; k < Dom.Gfz.ke; k++) { for(j = Dom.Gfz.js; j < Dom.Gfz.je; j++) { for(i = Dom.Gfz.is; i < Dom.Gfz.ie; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_c[C] = w[C]; w_e[C] = (w_c[C] - w_a[C]) / w_a[C]; if(fabs(w_e[C]) > w_err_max) w_err_max = fabs(w_e[C]); if(fabs(w_e[C]) < w_err_min) w_err_min = fabs(w_e[C]); w[C] = w_e[C]; } } } // write error difference rec_paraview_stepnum_out++; printf(" Writing error difference to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); printf("\n Error summary:\n"); printf(" Velocity component: minimum error: maximum error:\n"); printf(" u %12.3e %12.3e\n", u_err_min, u_err_max); printf(" v %12.3e %12.3e\n", v_err_min, v_err_max); printf(" w %12.3e %12.3e\n\n", w_err_min, w_err_max); // clean up free(u_a); free(u_c); free(u_e); free(v_a); free(v_c); free(v_e); free(w_a); free(w_c); free(w_e); } extern "C" void cuda_U_star_test_cos(void) { int i, j, k; // iterators int C; // cell locations real *u_a = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_c = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_e = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfx.s3b * sizeof(real); real *v_a = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_c = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_e = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfy.s3b * sizeof(real); real *w_a = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_c = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_e = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfz.s3b * sizeof(real); // min and max error search real u_err_min = FLT_MAX; real u_err_max = FLT_MIN; real v_err_min = FLT_MAX; real v_err_max = FLT_MIN; real w_err_min = FLT_MAX; real w_err_max = FLT_MIN; printf("Intermediate velocity calculation validation:\n\n"); printf(" u = cos(y), v = cos(z), w = cos(x)\n\n"); dt = 1; //dt0 = dt; printf("dt = %f, dt0 = %f\n", dt, dt0); // set up expected solution for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; /* u_a[C] = nu * sin((j-1.0)*Dom.dy); u_a[C] += cos((j-1.0)*Dom.dy) * sin((k-1.0)*Dom.dz); u_a[C] *= -dt; u_a[C] += sin((j-1.0)*Dom.dy); */ real x = 2.*PI*(i-1.0)*Dom.dx; real y = 2.*PI*(j-0.5)*Dom.dy; //real z = 2.*PI*(k-0.5)*Dom.dz; //u_a[C] = -4.*PI*sin(x)*cos(x)*sin(y)*sin(y); //u_a[C] += 2.*PI*sin(x)*cos(x)*(sin(y)*sin(y)-cos(y)*cos(y)); u_a[C] = 8.*PI*PI*nu*cos(x)*sin(y); //u_a[C] += PI*sin(2.*x); u_a[C] *= -dt; //u_a[C] = cos(x)*sin(y) * exp(-16.*PI*PI*1.0*dt); u[C] = u_a[C]; conv0_u[C] = -4.*PI*sin(x)*cos(x)*sin(y)*sin(y) + 2.*PI*sin(x)*cos(x) *(sin(y)*sin(y)-cos(y)*cos(y)); conv0_u[C] *= exp(16.*PI*PI*1.0*dt); #ifdef EXPLICIT diff0_u[C] = -8.*PI*PI*nu*cos(x)*sin(y); diff0_u[C] *= exp(16.*PI*PI*1.0*dt); #endif } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; /* v_a[C] = nu * sin((k-1.0)*Dom.dz); v_a[C] += cos((k-1.0)*Dom.dz) * sin((i-1.0)*Dom.dx); v_a[C] *= -dt; v_a[C] += sin((k-1.0)*Dom.dz); */ real x = 2.*PI*(i-0.5)*Dom.dx; real y = 2.*PI*(j-1.0)*Dom.dy; //real z = 2.*PI*(k-0.5)*Dom.dz; //v_a[C] = 2.*PI*cos(y)*sin(y)*(sin(x)*sin(x)-cos(x)*cos(x)); //v_a[C] += -4.*PI*sin(x)*sin(x)*cos(y)*sin(y); v_a[C] = -8.*PI*PI*nu*sin(x)*cos(y); // v_a[C] += PI*sin(2.*y); v_a[C] *= -dt; //v_a[C] = -sin(x)*cos(y) * exp(-16.*PI*PI*1.0*dt); v[C] = v_a[C]; conv0_v[C] = -4.*PI*sin(x)*sin(x)*sin(y)*cos(y) + 2.*PI*sin(y)*cos(y) *(sin(x)*sin(x)-cos(x)*cos(x)); conv0_v[C] *= exp(16.*PI*PI*1.0*dt); #ifdef EXPLICIT diff0_v[C] = 8.*PI*PI*nu*sin(x)*cos(y); diff0_v[C] *= exp(16.*PI*PI*1.0*dt); #endif } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_a[C] = nu * sin((i-1.0)*Dom.dx); w_a[C] += cos((i-1.0)*Dom.dx) * sin((j-1.0)*Dom.dy); w_a[C] *= -dt; w_a[C] += sin((i-1.0)*Dom.dx); w_a[C] = 0; w[C] = w_a[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // initialize input pressure and velocity fields for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; real x = 2.*PI*(i-1.0)*Dom.dx; real y = 2.*PI*(j-0.5)*Dom.dy; //real z = 2.*PI*(k-0.5)*Dom.dz; //u[C] = sin((j-1.0)*Dom.dy); u[C] = cos(x)*sin(y); } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; real x = 2.*PI*(i-0.5)*Dom.dx; real y = 2.*PI*(j-1.0)*Dom.dy; //real z = 2.*PI*(k-0.5)*Dom.dz; //v[C] = sin((k-1.0)*Dom.dz); v[C] = -sin(x)*cos(y); } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; //w[C] = sin((i-1.0)*Dom.dx); w[C] = 0; } } } for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; real x = 2.*PI*(i-0.5)*Dom.dx; real y = 2.*PI*(j-0.5)*Dom.dy; //real z = 2.*PI*(k-0.5)*Dom.dz; p[C] = -0.25*rho_f*(cos(2.*x)+cos(2.*y)); p0[C] = -0.25*rho_f*(cos(2.*x)+cos(2.*y)); } } } // write initial fields rec_paraview_stepnum_out++; printf(" Writing initial fields to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // push fields to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // call code to test #ifdef EXPLICIT printf(" Running cuda_U_star_2()..."); cuda_U_star_2(); printf("done.\n"); #endif // pull fields back to host printf(" Pulling fields back to host..."); cuda_dom_pull(); printf("done.\n"); // write computed solution rec_paraview_stepnum_out++; printf("\n Writing computed solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // copy results and compute error u_err_max = 0; for(k = Dom.Gfx.ks; k < Dom.Gfx.ke; k++) { for(j = Dom.Gfx.js; j < Dom.Gfx.je; j++) { for(i = Dom.Gfx.is; i < Dom.Gfx.ie; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_c[C] = u_star[C]; u_e[C] = (u_c[C] - u_a[C]);// / u_a[C]; u_err_max += fabs(u_e[C]); //if(fabs(u_e[C]) > u_err_max) u_err_max = fabs(u_e[C]); if(fabs(u_e[C]) < u_err_min) u_err_min = fabs(u_e[C]); u[C] = u_e[C]; } } } v_err_max = 0; for(k = Dom.Gfy.ks; k < Dom.Gfy.ke; k++) { for(j = Dom.Gfy.js; j < Dom.Gfy.je; j++) { for(i = Dom.Gfy.is; i < Dom.Gfy.ie; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_c[C] = v_star[C]; v_e[C] = (v_c[C] - v_a[C]);// / v_a[C]; v_err_max += fabs(v_e[C]); //if(fabs(v_e[C]) > v_err_max) v_err_max = fabs(v_e[C]); if(fabs(v_e[C]) < v_err_min) v_err_min = fabs(v_e[C]); v[C] = v_e[C]; } } } for(k = Dom.Gfz.ks; k < Dom.Gfz.ke; k++) { for(j = Dom.Gfz.js; j < Dom.Gfz.je; j++) { for(i = Dom.Gfz.is; i < Dom.Gfz.ie; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_c[C] = w_star[C]; w_e[C] = (w_c[C] - w_a[C]);// / w_a[C]; if(fabs(w_e[C]) > w_err_max) w_err_max = fabs(w_e[C]); if(fabs(w_e[C]) < w_err_min) w_err_min = fabs(w_e[C]); w[C] = w_e[C]; } } } // write error difference rec_paraview_stepnum_out++; printf(" Writing error difference to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); printf("\n Error summary:\n"); printf(" Velocity component: minimum error: maximum error:\n"); printf(" u %12.3e %12.3e\n", u_err_min, u_err_max); printf(" v %12.3e %12.3e\n", v_err_min, v_err_max); printf(" w %12.3e %12.3e\n\n", w_err_min, w_err_max); // clean up free(u_a); free(u_c); free(u_e); free(v_a); free(v_c); free(v_e); free(w_a); free(w_c); free(w_e); } __global__ void memcpy_u_star_test(real *dst, real *src, dom_struct *dom) { int j = blockIdx.x * blockDim.x + threadIdx.x; int k = blockIdx.y * blockDim.y + threadIdx.y; for(int i = dom->Gfx._is; i < dom->Gfx._ie; i++) { dst[i + j * dom->Gfx._s1b + k * dom->Gfx._s2b] = src[i + j * dom->Gfx._s1b + k * dom->Gfx._s2b]; } } __global__ void memcpy_v_star_test(real *dst, real *src, dom_struct *dom) { int k = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; for(int j = dom->Gfy._js; j < dom->Gfy._je; j++) { dst[i + j * dom->Gfy._s1b + k * dom->Gfy._s2b] = src[i + j * dom->Gfy._s1b + k * dom->Gfy._s2b]; } } __global__ void memcpy_w_star_test(real *dst, real *src, dom_struct *dom) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; for(int k = dom->Gfz._ks; k < dom->Gfz._ke; k++) { dst[i + j * dom->Gfz._s1b + k * dom->Gfz._s2b] = src[i + j * dom->Gfz._s1b + k * dom->Gfz._s2b]; } } __global__ void PP_memcpy_p_test(real *dst, real *src, dom_struct *dom) { int j = blockIdx.x * blockDim.x + threadIdx.x; int k = blockIdx.y * blockDim.y + threadIdx.y; for(int i = dom->Gcc._is-DOM_BUF; i < dom->Gcc._ie-DOM_BUF; i++) { dst[(i+DOM_BUF) + (j+DOM_BUF) * dom->Gfx._s1b + (k+DOM_BUF) * dom->Gcc._s2b] = src[i + j * dom->Gcc._s1b + k * dom->Gcc._s2b]; } } extern "C" void cuda_BC_test(void) { int i, j, k; // iterators int C; // cell locations real *p_p_i = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // periodic input // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_p_o = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // periodic output // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_p_e = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // periodic error // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_d_i = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // Dirichlet input // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_d_o = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // Dirichlet output // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_d_e = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // Dirichlet error // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_n_i = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // Neumann input // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_n_o = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // Neumann output // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_n_e = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // Neumann error // cpumem += Dom.Gcc.s3b * sizeof(real); real *u_p_i = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // periodic input // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_p_o = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // periodic output // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_p_e = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // periodic error // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_d_i = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // Dirichlet input // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_d_o = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // Dirichlet output // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_d_e = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // Dirichlet error // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_n_i = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // Neumann input // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_n_o = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // Neumann output // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_n_e = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // Neumann error // cpumem += Dom.Gfx.s3b * sizeof(real); real *v_p_i = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // periodic input // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_p_o = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // periodic output // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_p_e = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // periodic error // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_d_i = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // Dirichlet input // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_d_o = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // Dirichlet output // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_d_e = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // Dirichlet error // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_n_i = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // Neumann input // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_n_o = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // Neumann output // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_n_e = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // Neumann error // cpumem += Dom.Gfy.s3b * sizeof(real); real *w_p_i = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // periodic input // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_p_o = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // periodic output // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_p_e = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // periodic error // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_d_i = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // Dirichlet input // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_d_o = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // Dirichlet output // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_d_e = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // Dirichlet error // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_n_i = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // Neumann input // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_n_o = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // Neumann output // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_n_e = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // Neumann error // cpumem += Dom.Gfz.s3b * sizeof(real); // min and max error search real p_p_err_min = FLT_MAX; real p_p_err_max = FLT_MIN; real p_d_err_min = FLT_MAX; real p_d_err_max = FLT_MIN; real p_n_err_min = FLT_MAX; real p_n_err_max = FLT_MIN; real u_p_err_min = FLT_MAX; real u_p_err_max = FLT_MIN; real u_d_err_min = FLT_MAX; real u_d_err_max = FLT_MIN; real u_n_err_min = FLT_MAX; real u_n_err_max = FLT_MIN; real v_p_err_min = FLT_MAX; real v_p_err_max = FLT_MIN; real v_d_err_min = FLT_MAX; real v_d_err_max = FLT_MIN; real v_n_err_min = FLT_MAX; real v_n_err_max = FLT_MIN; real w_p_err_min = FLT_MAX; real w_p_err_max = FLT_MIN; real w_d_err_min = FLT_MAX; real w_d_err_max = FLT_MIN; real w_n_err_min = FLT_MAX; real w_n_err_max = FLT_MIN; printf("\nBoundary condition application validation:\n"); // periodic field (on -1 <= x <= 1, -1 <= y <= 1, -1 <= z <= 1) printf("\n Periodic boundary conditions:\n"); printf(" p = cos(pi*x) + cos(pi*y) + cos(pi*z)\n"); printf(" u = cos(pi*x) + cos(pi*y) + cos(pi*z)\n"); printf(" v = cos(pi*x) + cos(pi*y) + cos(pi*z)\n"); printf(" w = cos(pi*x) + cos(pi*y) + cos(pi*z)\n\n"); // write input fields for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p_p_i[C] = cos(PI * (i-0.5)*Dom.dx); p_p_i[C] += cos(PI * (j-0.5)*Dom.dy); p_p_i[C] += cos(PI * (k-0.5)*Dom.dz); p[C] = p_p_i[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_p_i[C] = cos(PI * (i-1.0)*Dom.dx); u_p_i[C] += cos(PI * (j-0.5)*Dom.dy); u_p_i[C] += cos(PI * (k-0.5)*Dom.dz); u[C] = u_p_i[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_p_i[C] = cos(PI * (i-0.5)*Dom.dx); v_p_i[C] += cos(PI * (j-1.0)*Dom.dy); v_p_i[C] += cos(PI * (k-0.5)*Dom.dz); v[C] = v_p_i[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_p_i[C] = cos(PI * (i-0.5)*Dom.dx); w_p_i[C] += cos(PI * (j-0.5)*Dom.dy); w_p_i[C] += cos(PI * (k-1.0)*Dom.dz); w[C] = w_p_i[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // push to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // set BC to PERIODIC (overwrite input file for testing) printf(" Overwriting input file boundary conditions to ensure "); printf("PERIODIC..."); bc.pW = PERIODIC; bc.pE = PERIODIC; bc.pS = PERIODIC; bc.pN = PERIODIC; bc.pB = PERIODIC; bc.pT = PERIODIC; bc.uW = PERIODIC; bc.uE = PERIODIC; bc.uS = PERIODIC; bc.uN = PERIODIC; bc.uB = PERIODIC; bc.uT = PERIODIC; bc.vW = PERIODIC; bc.vE = PERIODIC; bc.vS = PERIODIC; bc.vN = PERIODIC; bc.vB = PERIODIC; bc.vT = PERIODIC; bc.wW = PERIODIC; bc.wE = PERIODIC; bc.wS = PERIODIC; bc.wN = PERIODIC; bc.wB = PERIODIC; bc.wT = PERIODIC; printf("done.\n"); // apply BC printf(" Running cuda_BC()..."); cuda_dom_BC(); printf("done.\n"); // pull to host printf(" Pulling fields back to host..."); cuda_dom_pull(); printf("done.\n"); // write computed solution rec_paraview_stepnum_out++; printf("\n Writing computed solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // copy results and compute error for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p_p_o[C] = p[C]; p_p_e[C] = p_p_o[C] - p_p_i[C]; if(fabs(p_p_e[C]) > p_p_err_max) p_p_err_max = fabs(p_p_e[C]); if(fabs(p_p_e[C]) < p_p_err_min) p_p_err_min = fabs(p_p_e[C]); p[C] = p_p_e[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_p_o[C] = u[C]; u_p_e[C] = u_p_o[C] - u_p_i[C]; if(fabs(u_p_e[C]) > u_p_err_max) u_p_err_max = fabs(u_p_e[C]); if(fabs(u_p_e[C]) < u_p_err_min) u_p_err_min = fabs(u_p_e[C]); u[C] = u_p_e[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_p_o[C] = v[C]; v_p_e[C] = v_p_o[C] - v_p_i[C]; if(fabs(v_p_e[C]) > v_p_err_max) v_p_err_max = fabs(v_p_e[C]); if(fabs(v_p_e[C]) < v_p_err_min) v_p_err_min = fabs(v_p_e[C]); v[C] = v_p_e[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_p_o[C] = w[C]; w_p_e[C] = w_p_o[C] - w_p_i[C]; if(fabs(w_p_e[C]) > w_p_err_max) w_p_err_max = fabs(w_p_e[C]); if(fabs(w_p_e[C]) < w_p_err_min) w_p_err_min = fabs(w_p_e[C]); w[C] = w_p_e[C]; } } } // write error difference rec_paraview_stepnum_out++; printf(" Writing error difference to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); printf("\n Error summary:\n"); printf(" Field component: minimum error: maximum error:\n"); printf(" p %12.3e %12.3e\n", p_p_err_min, p_p_err_max); printf(" u %12.3e %12.3e\n", u_p_err_min, u_p_err_max); printf(" v %12.3e %12.3e\n", v_p_err_min, v_p_err_max); printf(" w %12.3e %12.3e\n", w_p_err_min, w_p_err_max); // Dirichlet field (on -1 <= x <= 1, -1 <= y <= 1, -1 <= z <= 1) printf("\n Dirichlet boundary conditions:\n"); printf(" p = sin(pi*x) * sin(pi*y) * sin(pi*z)\n"); printf(" u = sin(pi*x) * sin(pi*y) * sin(pi*z)\n"); printf(" v = sin(pi*x) * sin(pi*y) * sin(pi*z)\n"); printf(" w = sin(pi*x) * sin(pi*y) * sin(pi*z)\n\n"); // write input field for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p_d_i[C] = sin(PI * (i-0.5)*Dom.dx); p_d_i[C] *= sin(PI * (j-0.5)*Dom.dy); p_d_i[C] *= sin(PI * (k-0.5)*Dom.dz); p[C] = p_d_i[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_d_i[C] = sin(PI * (i-1.0)*Dom.dx); u_d_i[C] *= sin(PI * (j-0.5)*Dom.dy); u_d_i[C] *= sin(PI * (k-0.5)*Dom.dz); u[C] = u_d_i[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_d_i[C] = sin(PI * (i-0.5)*Dom.dx); v_d_i[C] *= sin(PI * (j-1.0)*Dom.dy); v_d_i[C] *= sin(PI * (k-0.5)*Dom.dz); v[C] = v_d_i[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_d_i[C] = sin(PI * (i-0.5)*Dom.dx); w_d_i[C] *= sin(PI * (j-0.5)*Dom.dy); w_d_i[C] *= sin(PI * (k-1.0)*Dom.dz); w[C] = w_d_i[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // push to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // set BC to DIRICHLET (overwrite input file for testing) printf(" Overwriting input file boundary conditions to ensure "); printf("DIRICHLET..."); bc.pW = DIRICHLET; bc.pWD = 0.; bc.pE = DIRICHLET; bc.pED = 0.; bc.pS = DIRICHLET; bc.pSD = 0.; bc.pN = DIRICHLET; bc.pND = 0.; bc.pB = DIRICHLET; bc.pBD = 0.; bc.pT = DIRICHLET; bc.pTD = 0.; bc.uW = DIRICHLET; bc.uWD = 0.; bc.uE = DIRICHLET; bc.uED = 0.; bc.uS = DIRICHLET; bc.uSD = 0.; bc.uN = DIRICHLET; bc.uND = 0.; bc.uB = DIRICHLET; bc.uBD = 0.; bc.uT = DIRICHLET; bc.uTD = 0.; bc.vW = DIRICHLET; bc.vWD = 0.; bc.vE = DIRICHLET; bc.vED = 0.; bc.vS = DIRICHLET; bc.vSD = 0.; bc.vN = DIRICHLET; bc.vND = 0.; bc.vB = DIRICHLET; bc.vBD = 0.; bc.vT = DIRICHLET; bc.vTD = 0.; bc.wW = DIRICHLET; bc.wWD = 0.; bc.wE = DIRICHLET; bc.wED = 0.; bc.wS = DIRICHLET; bc.wSD = 0.; bc.wN = DIRICHLET; bc.wND = 0.; bc.wB = DIRICHLET; bc.wBD = 0.; bc.wT = DIRICHLET; bc.wTD = 0.; printf("done.\n"); // apply BC printf(" Running cuda_BC()..."); cuda_dom_BC(); printf("done.\n"); // pull to host printf(" Pulling fields back to host..."); cuda_dom_pull(); printf("done.\n"); // write computed solution rec_paraview_stepnum_out++; printf("\n Writing computed solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // copy results and compute error for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p_d_o[C] = p[C]; p_d_e[C] = p_d_o[C] - p_d_i[C]; if(fabs(p_d_e[C]) > p_d_err_max) p_d_err_max = fabs(p_d_e[C]); if(fabs(p_d_e[C]) < p_d_err_min) p_d_err_min = fabs(p_d_e[C]); p[C] = p_d_e[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_d_o[C] = u[C]; u_d_e[C] = u_d_o[C] - u_d_i[C]; if(fabs(u_d_e[C]) > u_d_err_max) u_d_err_max = fabs(u_d_e[C]); if(fabs(u_d_e[C]) < u_d_err_min) u_d_err_min = fabs(u_d_e[C]); u[C] = u_d_e[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_d_o[C] = v[C]; v_d_e[C] = v_d_o[C] - v_d_i[C]; if(fabs(v_d_e[C]) > v_d_err_max) v_d_err_max = fabs(v_d_e[C]); if(fabs(v_d_e[C]) < v_d_err_min) v_d_err_min = fabs(v_d_e[C]); v[C] = v_d_e[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_d_o[C] = w[C]; w_d_e[C] = w_d_o[C] - w_d_i[C]; if(fabs(w_d_e[C]) > w_d_err_max) w_d_err_max = fabs(w_d_e[C]); if(fabs(w_d_e[C]) < w_d_err_min) w_d_err_min = fabs(w_d_e[C]); w[C] = w_d_e[C]; } } } // write error difference rec_paraview_stepnum_out++; printf(" Writing error difference to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); printf("\n Error summary:\n"); printf(" Field component: minimum error: maximum error:\n"); printf(" p %12.3e %12.3e\n", p_d_err_min, p_d_err_max); printf(" u %12.3e %12.3e\n", u_d_err_min, u_d_err_max); printf(" v %12.3e %12.3e\n", v_d_err_min, v_d_err_max); printf(" w %12.3e %12.3e\n", w_d_err_min, w_d_err_max); // Neumann field (on -1 <= x <= 1, -1 <= y <= 1, -1 <= z <= 1) printf("\n Neumann boundary conditions:\n"); printf(" p = cos(pi*x) + cos(pi*y) + cos(pi*z)\n"); printf(" u = cos(pi*x) + cos(pi*y) + cos(pi*z)\n"); printf(" v = cos(pi*x) + cos(pi*y) + cos(pi*z)\n"); printf(" w = cos(pi*x) + cos(pi*y) + cos(pi*z)\n\n"); // write input field for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p_n_i[C] = cos(PI * (i-0.5)*Dom.dx); p_n_i[C] += cos(PI * (j-0.5)*Dom.dy); p_n_i[C] += cos(PI * (k-0.5)*Dom.dz); p[C] = p_n_i[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_n_i[C] = cos(PI * (i-1.0)*Dom.dx); u_n_i[C] += cos(PI * (j-0.5)*Dom.dy); u_n_i[C] += cos(PI * (k-0.5)*Dom.dz); u[C] = u_n_i[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_n_i[C] = cos(PI * (i-0.5)*Dom.dx); v_n_i[C] += cos(PI * (j-1.0)*Dom.dy); v_n_i[C] += cos(PI * (k-0.5)*Dom.dz); v[C] = v_n_i[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_n_i[C] = cos(PI * (i-0.5)*Dom.dx); w_n_i[C] += cos(PI * (j-0.5)*Dom.dy); w_n_i[C] += cos(PI * (k-1.0)*Dom.dz); w[C] = w_n_i[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // push to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // set BC to NEUMANN (overwrite input file for testing) printf(" Overwriting input file boundary conditions to ensure "); printf("NEUMANN..."); bc.pW = NEUMANN; bc.pE = NEUMANN; bc.pS = NEUMANN; bc.pN = NEUMANN; bc.pB = NEUMANN; bc.pT = NEUMANN; bc.uW = NEUMANN; bc.uE = NEUMANN; bc.uS = NEUMANN; bc.uN = NEUMANN; bc.uB = NEUMANN; bc.uT = NEUMANN; bc.vW = NEUMANN; bc.vE = NEUMANN; bc.vS = NEUMANN; bc.vN = NEUMANN; bc.vB = NEUMANN; bc.vT = NEUMANN; bc.wW = NEUMANN; bc.wE = NEUMANN; bc.wS = NEUMANN; bc.wN = NEUMANN; bc.wB = NEUMANN; bc.wT = NEUMANN; printf("done.\n"); // apply BC printf(" Running cuda_BC()..."); cuda_dom_BC(); printf("done.\n"); // pull to host printf(" Pulling fields back to host..."); cuda_dom_pull(); printf("done.\n"); // write computed solution rec_paraview_stepnum_out++; printf("\n Writing computed solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // copy results and compute error for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p_n_o[C] = p[C]; p_n_e[C] = p_n_o[C] - p_n_i[C]; if(fabs(p_n_e[C]) > p_n_err_max) p_n_err_max = fabs(p_n_e[C]); if(fabs(p_n_e[C]) < p_n_err_min) p_n_err_min = fabs(p_n_e[C]); p[C] = p_n_e[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_n_o[C] = u[C]; u_n_e[C] = u_n_o[C] - u_n_i[C]; if(fabs(u_n_e[C]) > u_n_err_max) u_n_err_max = fabs(u_n_e[C]); if(fabs(u_n_e[C]) < u_n_err_min) u_n_err_min = fabs(u_n_e[C]); u[C] = u_n_e[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_n_o[C] = v[C]; v_n_e[C] = v_n_o[C] - v_n_i[C]; if(fabs(v_n_e[C]) > v_n_err_max) v_n_err_max = fabs(v_n_e[C]); if(fabs(v_n_e[C]) < v_n_err_min) v_n_err_min = fabs(v_n_e[C]); v[C] = v_n_e[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_n_o[C] = w[C]; w_n_e[C] = w_n_o[C] - w_n_i[C]; if(fabs(w_n_e[C]) > w_n_err_max) w_n_err_max = fabs(w_n_e[C]); if(fabs(w_n_e[C]) < w_n_err_min) w_n_err_min = fabs(w_n_e[C]); w[C] = w_n_e[C]; } } } // write error difference rec_paraview_stepnum_out++; printf(" Writing error difference to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); printf("\n Error summary:\n"); printf(" Field component: minimum error: maximum error:\n"); printf(" p %12.3e %12.3e\n", p_n_err_min, p_n_err_max); printf(" u %12.3e %12.3e\n", u_n_err_min, u_n_err_max); printf(" v %12.3e %12.3e\n", v_n_err_min, v_n_err_max); printf(" w %12.3e %12.3e\n", w_n_err_min, w_n_err_max); // clean up free(p_p_i); free(p_p_o); free(p_p_e); free(p_d_i); free(p_d_o); free(p_d_e); free(p_n_i); free(p_n_o); free(p_n_e); free(u_p_i); free(u_p_o); free(u_p_e); free(u_d_i); free(u_d_o); free(u_d_e); free(u_n_i); free(u_n_o); free(u_n_e); free(v_p_i); free(v_p_o); free(v_p_e); free(v_d_i); free(v_d_o); free(v_d_e); free(v_n_i); free(v_n_o); free(v_n_e); free(w_p_i); free(w_p_o); free(w_p_e); free(w_d_i); free(w_d_o); free(w_d_e); free(w_n_i); free(w_n_o); free(w_n_e); } void cuda_project_test(void) { int i, j, k; // iterators int C; // cell locations real *u_a = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_c = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_e = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfx.s3b * sizeof(real); real *v_a = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_c = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_e = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // error difference // cpumem += Dom.Gyz.s3b * sizeof(real); real *w_a = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_c = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_e = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfz.s3b * sizeof(real); // min and max error search real u_err_min = FLT_MAX; real u_err_max = FLT_MIN; real v_err_min = FLT_MAX; real v_err_max = FLT_MIN; real w_err_min = FLT_MAX; real w_err_max = FLT_MIN; printf("\nVelocity projection calculation validation:\n\n"); printf(" u = exp(x), v = exp(y), w = exp(z), "); printf("p = exp(x) + exp(y) + exp(z)\n\n"); // set up expected solution for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_a[C] = nu; u_a[C] -= 1. / rho_f; u_a[C] -= 2 * exp((i-1.5)*Dom.dx); u_a[C] -= exp((j-1.0)*Dom.dy); u_a[C] -= exp((k-1.0)*Dom.dz); u_a[C] *= dt * exp((i-1.5)*Dom.dx); u_a[C] += exp((i-1.5)*Dom.dx); u[C] = u_a[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_a[C] = nu; v_a[C] -= 1. / rho_f; v_a[C] -= 2 * exp((j-1.5)*Dom.dy); v_a[C] -= exp((k-1.0)*Dom.dz); v_a[C] -= exp((i-1.0)*Dom.dx); v_a[C] *= dt * exp((j-1.5)*Dom.dy); v_a[C] += exp((j-1.5)*Dom.dy); v[C] = v_a[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_a[C] = nu; w_a[C] -= 1. / rho_f; w_a[C] -= 2 * exp((k-1.5)*Dom.dz); w_a[C] -= exp((i-1.0)*Dom.dx); w_a[C] -= exp((j-1.0)*Dom.dy); w_a[C] *= dt * exp((k-1.5)*Dom.dz); w_a[C] += exp((k-1.5)*Dom.dz); w[C] = w_a[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // initialize input pressure and velocity fields for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p[C] = exp((i-1.0)*Dom.dx) + exp((j-1.0)*Dom.dy) + exp((k-1.0)*Dom.dz); } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u[C] = exp((i-1.5)*Dom.dx); } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v[C] = exp((j-1.5)*Dom.dy); } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w[C] = exp((k-1.5)*Dom.dz); } } } // write initial fields rec_paraview_stepnum_out++; printf(" Writing initial fields to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // push fields to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // call code to test #ifdef EXPLICIT printf(" Running cuda_U_star_2()..."); cuda_U_star_2(); cuda_project(); printf("done.\n"); #endif // pull fields back to host printf(" Pulling fields back to host..."); cuda_dom_pull(); printf("done.\n"); // write computed solution rec_paraview_stepnum_out++; printf("\n Writing computed solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // copy results and compute error for(k = Dom.Gfx.ks; k < Dom.Gfx.ke; k++) { for(j = Dom.Gfx.js; j < Dom.Gfx.je; j++) { for(i = Dom.Gfx.is; i < Dom.Gfx.ie; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_c[C] = u[C]; u_e[C] = (u_c[C] - u_a[C]) / u_a[C]; if(fabs(u_e[C]) > u_err_max) u_err_max = fabs(u_e[C]); if(fabs(u_e[C]) < u_err_min) u_err_min = fabs(u_e[C]); u[C] = u_e[C]; } } } for(k = Dom.Gfy.ks; k < Dom.Gfy.ke; k++) { for(j = Dom.Gfy.js; j < Dom.Gfy.je; j++) { for(i = Dom.Gfy.is; i < Dom.Gfy.ie; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_c[C] = v[C]; v_e[C] = (v_c[C] - v_a[C]) / v_a[C]; if(fabs(v_e[C]) > v_err_max) v_err_max = fabs(v_e[C]); if(fabs(v_e[C]) < v_err_min) v_err_min = fabs(v_e[C]); v[C] = v_e[C]; } } } for(k = Dom.Gfz.ks; k < Dom.Gfz.ke; k++) { for(j = Dom.Gfz.js; j < Dom.Gfz.je; j++) { for(i = Dom.Gfz.is; i < Dom.Gfz.ie; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_c[C] = w[C]; w_e[C] = (w_c[C] - w_a[C]) / w_a[C]; if(fabs(w_e[C]) > w_err_max) w_err_max = fabs(w_e[C]); if(fabs(w_e[C]) < w_err_min) w_err_min = fabs(w_e[C]); w[C] = w_e[C]; } } } // write error difference rec_paraview_stepnum_out++; printf(" Writing error difference to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); printf("\n Error summary:\n"); printf(" Velocity component: minimum error: maximum error:\n"); printf(" u %12.3e %12.3e\n", u_err_min, u_err_max); printf(" v %12.3e %12.3e\n", v_err_min, v_err_max); printf(" w %12.3e %12.3e\n\n", w_err_min, w_err_max); // clean up free(u_a); free(u_c); free(u_e); free(v_a); free(v_c); free(v_e); free(w_a); free(w_c); free(w_e); } extern "C" void cuda_quad_interp_test(void) { int i, j, k; // iterators int C; real *p_a = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // cpumem += Dom.Gcc.s3b * sizeof(real); real *u_a = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // cpumem += Dom.Gfx.s3b * sizeof(real); real *v_a = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // cpumem += Dom.Gfy.s3b * sizeof(real); real *w_a = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // cpumem += Dom.Gfz.s3b * sizeof(real); real x, y, z; // min and max error search real *p_err_min = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); real *p_err_max = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); real *u_err_min = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); real *u_err_max = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); real *v_err_min = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); real *v_err_max = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); real *w_err_min = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); real *w_err_max = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); (hipSetDevice(dev_start)); printf("\nLebedev quadrature interpolation validation:\n\n"); printf(" p = u = v = w = exp(x) + exp(y) + exp(z)\n\n"); // create analytic result and push to device for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.ksb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; x = (i-0.5)*Dom.dx + Dom.xs; y = (j-0.5)*Dom.dy + Dom.ys; z = (k-0.5)*Dom.dz + Dom.zs; p_a[C] = exp(x) + exp(y) + exp(z); p[C] = p_a[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.ksb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; x = (i-1.0)*Dom.dx + Dom.xs; y = (j-0.5)*Dom.dy + Dom.ys; z = (k-0.5)*Dom.dz + Dom.zs; u_a[C] = exp(x) + exp(y) + exp(z); u[C] = u_a[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.ksb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; x = (i-0.5)*Dom.dx + Dom.xs; y = (j-1.0)*Dom.dy + Dom.ys; z = (k-0.5)*Dom.dz + Dom.zs; v_a[C] = exp(x) + exp(y) + exp(z); v[C] = v_a[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.ksb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; x = (i-0.5)*Dom.dx + Dom.xs; y = (j-0.5)*Dom.dy + Dom.ys; z = (k-1.0)*Dom.dz + Dom.zs; w_a[C] = exp(x) + exp(y) + exp(z); w[C] = w_a[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // push fields to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // call code to test printf(" Running cuda_quad_interp()..."); // set up quadrature nodes for 7th-order Lebedev quadrature /*real PI14 = 0.25 * PI; real PI12 = 0.5 * PI; real PI34 = 0.75 * PI; real PI54 = 1.25 * PI; real PI32 = 1.5 * PI; real PI74 = 1.75 * PI; real alph1 = 0.955316618124509; real alph2 = 2.186276035465284; // nodes real a1_t[6] = {PI12, PI12, PI12, PI12, 0., 0.}; real a1_p[6] = {0., PI12, PI, PI32, 0., PI}; real a2_t[12] = {PI12, PI12, PI12, PI12, PI14, PI14, PI14, PI14, PI34, PI34, PI34, PI34}; real a2_p[12] = {PI14, PI34, PI54, PI74, 0., PI12, PI, PI32, 0., PI12, PI, PI32}; real a3_t[8] = {alph1, alph1, alph1, alph1, alph2, alph2, alph2, alph2}; real a3_p[8] = {PI14, PI34, PI54, PI74, PI14, PI34, PI54, PI74}; int nnodes = 26; // put all quadrature nodes together for interpolation real node_t[26]; real node_p[26]; for(i = 0; i < 6; i++) { node_t[i] = a1_t[i]; node_p[i] = a1_p[i]; } for(i = 0; i < 12; i++) { node_t[6+i] = a2_t[i]; node_p[6+i] = a2_p[i]; } for(i = 0; i < 8; i++) { node_t[18+i] = a3_t[i]; node_p[18+i] = a3_p[i]; } */ real PI14 = 0.25 * PI; real PI12 = 0.5 * PI; real PI34 = 0.75 * PI; real PI54 = 1.25 * PI; real PI32 = 1.5 * PI; real PI74 = 1.75 * PI; real alph1 = 0.955316618124509; //54.736 real alph2 = 2.186276035465284; //125.264 /*real alph3 = 0.440510663004698; //25.239 real alph4 = 2.701081990585095; //154.761 real alph5 = 1.264518957625227; //72.452 real alph6 = 1.877073695964566; //107.548 real alph7 = 1.249045772398254; //71.565 real alph8 = 1.892546881191539; //108.435 real alph9 = 0.321750554396642; //18.435 real alph10 = 2.819842099193151; //161.565 */ // nodes TODO: find a more elegant way of fixing the divide by sin(0) real a1_t[6] = {PI12, PI12, PI12, PI12, 0.+DIV_ST, PI-DIV_ST}; real a1_p[6] = {0., PI12, PI, PI32, 0., 0.}; real a2_t[12] = {PI12, PI12, PI12, PI12, PI14, PI14, PI14, PI14, PI34, PI34, PI34, PI34}; real a2_p[12] = {PI14, PI34, PI54, PI74, 0., PI12, PI, PI32, 0., PI12, PI, PI32}; real a3_t[8] = {alph1, alph1, alph1, alph1, alph2, alph2, alph2, alph2}; real a3_p[8] = {PI14, PI34, PI54, PI74, PI14, PI34, PI54, PI74}; /*real b_t[24] = {alph3, alph4, alph3, alph4, alph3, alph4, alph3, alph4, alph5, alph5, alph6, alph6, alph5, alph5, alph6, alph6, alph5, alph5, alph6, alph6, alph5, alph5, alph6, alph6}; real b_p[24] = {PI14, PI74, PI14, PI74, PI34, PI54, PI34, PI54, alph7, -alph7, alph7, -alph7, alph8, -alph8, alph8, -alph8, alph9, alph10, alph9, alph10, -alph9, -alph10, -alph9, -alph10}; */ int nnodes = 26; // put all quadrature nodes together for interpolation real node_t[nnodes]; real node_p[nnodes]; for(i = 0; i < 6; i++) { node_t[i] = a1_t[i]; node_p[i] = a1_p[i]; } for(i = 0; i < 12; i++) { node_t[6+i] = a2_t[i]; node_p[6+i] = a2_p[i]; } for(i = 0; i < 8; i++) { node_t[18+i] = a3_t[i]; node_p[18+i] = a3_p[i]; } /*for(i = 0; i < 24; i++) { node_t[26+i] = b_t[i]; node_p[26+i] = b_p[i]; } */ // create a place to temporarily store field variables at quadrature nodes real *_node_t; real *_node_p; (hipMalloc((void**) &_node_t, nnodes * sizeof(real))); gpumem += nnodes * sizeof(real); (hipMalloc((void**) &_node_p, nnodes * sizeof(real))); gpumem += nnodes * sizeof(real); (hipMemcpy(_node_t, node_t, nnodes * sizeof(real), hipMemcpyHostToDevice)); (hipMemcpy(_node_p, node_p, nnodes * sizeof(real), hipMemcpyHostToDevice)); real *_pp; real *_ur; real *_ut; real *_up; real *pp = (real*) malloc(nnodes * nparts * sizeof(real)); // cpumem += nnodes * nparts * sizeof(real); real *ur = (real*) malloc(nnodes * nparts * sizeof(real)); // cpumem += nnodes * nparts * sizeof(real); real *ut = (real*) malloc(nnodes * nparts * sizeof(real)); // cpumem += nnodes * nparts * sizeof(real); real *up = (real*) malloc(nnodes * nparts * sizeof(real)); // cpumem += nnodes * nparts * sizeof(real); (hipMalloc((void**) &_pp, nnodes * nparts * sizeof(real))); gpumem += nnodes * nparts * sizeof(real); (hipMalloc((void**) &_ur, nnodes * nparts * sizeof(real))); gpumem += nnodes * nparts * sizeof(real); (hipMalloc((void**) &_ut, nnodes * nparts * sizeof(real))); gpumem += nnodes * nparts * sizeof(real); (hipMalloc((void**) &_up, nnodes * nparts * sizeof(real))); gpumem += nnodes * nparts * sizeof(real); cuda_quad_interp(dev_start, _node_t, _node_p, nnodes, _pp, _ur, _ut, _up); printf("done.\n"); // pull fields back to host printf(" Pulling fields back to host..."); (hipMemcpy(pp, _pp, nnodes * nparts * sizeof(real), hipMemcpyDeviceToHost)); (hipMemcpy(ur, _ur, nnodes * nparts * sizeof(real), hipMemcpyDeviceToHost)); (hipMemcpy(ut, _ut, nnodes * nparts * sizeof(real), hipMemcpyDeviceToHost)); (hipMemcpy(up, _up, nnodes * nparts * sizeof(real), hipMemcpyDeviceToHost)); printf("done.\n"); for(i = 0; i < nnodes; i++) { printf("xx[%d] = %f yy[%d] = %f zz[%d] = %f\n", i, ur[i], i, ut[i], i, up[i]); } // write computed solution printf("\n Writing summarized solution to: out_%d.interp...", rec_paraview_stepnum_out); char path[FILE_NAME_SIZE] = ""; sprintf(path, "%s/output/out_%d.interp", ROOT_DIR, rec_paraview_stepnum_out); FILE *file = fopen(path, "w"); if(file == NULL) { fprintf(stderr, "Could not open file out_%d.interp", rec_paraview_stepnum_out); exit(EXIT_FAILURE); } for(int part = 0; part < nparts; part++) { p_err_min[part] = FLT_MAX; p_err_max[part] = FLT_MIN; u_err_min[part] = FLT_MAX; u_err_max[part] = FLT_MIN; v_err_min[part] = FLT_MAX; v_err_max[part] = FLT_MIN; w_err_min[part] = FLT_MAX; w_err_max[part] = FLT_MIN; fprintf(file, "parts[%d].rs = %f\n", part, parts[part].rs); fprintf(file, "%11s%11s%11s%11s%11s%11s%11s\n", "theta", "phi", "expected", "p_err", "u_err", "v_err", "w_err"); for(int n = 0; n < nnodes; n++) { real x_tmp = parts[part].rs*sin(node_t[n])*cos(node_p[n]) + parts[part].x; real y_tmp = parts[part].rs*sin(node_t[n])*sin(node_p[n]) + parts[part].y; real z_tmp = parts[part].rs*cos(node_t[n]) + parts[part].z; real pa_tmp = exp(x_tmp) + exp(y_tmp) + exp(z_tmp); real u_tmp = ur[n+part*nnodes]*sin(node_t[n])*cos(node_p[n]); u_tmp += ut[n+part*nnodes]*cos(node_t[n])*cos(node_p[n]); u_tmp -= up[n+part*nnodes]*sin(node_p[n]); real v_tmp = ur[n+part*nnodes]*sin(node_t[n])*sin(node_p[n]); v_tmp += ut[n+part*nnodes]*cos(node_t[n])*sin(node_p[n]); v_tmp += up[n+part*nnodes]*cos(node_p[n]); real w_tmp = ur[n+part*nnodes]*cos(node_t[n]); w_tmp -= ut[n+part*nnodes]*sin(node_t[n]); real p_out = (pa_tmp-pp[n+part*nnodes]) / pa_tmp; real u_out = (pa_tmp-u_tmp) / pa_tmp; real v_out = (pa_tmp-v_tmp) / pa_tmp; real w_out = (pa_tmp-w_tmp) / pa_tmp; if(fabs(p_out) < p_err_min[part]) p_err_min[part] = fabs(p_out); if(fabs(p_out) > p_err_max[part]) p_err_max[part] = fabs(p_out); if(fabs(u_out) < u_err_min[part]) u_err_min[part] = fabs(u_out); if(fabs(u_out) > u_err_max[part]) u_err_max[part] = fabs(u_out); if(fabs(v_out) < v_err_min[part]) v_err_min[part] = fabs(v_out); if(fabs(v_out) > v_err_max[part]) v_err_max[part] = fabs(v_out); if(fabs(w_out) < w_err_min[part]) w_err_min[part] = fabs(w_out); if(fabs(w_out) > w_err_max[part]) w_err_max[part] = fabs(w_out); fprintf(file, "%11.7f%11.7f%11.7f%11.3e%11.3e%11.3e%11.3e\n", node_t[n], node_p[n], pa_tmp, p_out, u_out, v_out, w_out); } } fclose(file); printf("done.\n"); printf("\n Error summary:\n"); for(int a = 0; a < nparts; a++) { printf(" Particle %d\n", a); printf(" Field component: minimum error: maximum error:\n"); printf(" p %12.3e %12.3e\n", p_err_min[a], p_err_max[a]); printf(" u %12.3e %12.3e\n", u_err_min[a], u_err_max[a]); printf(" v %12.3e %12.3e\n", v_err_min[a], v_err_max[a]); printf(" w %12.3e %12.3e\n\n", w_err_min[a], w_err_max[a]); } free(p_a); free(u_a); free(v_a); free(w_a); free(p_err_min); free(p_err_max); free(u_err_min); free(u_err_max); free(v_err_min); free(v_err_max); free(w_err_min); free(w_err_max); free(pp); free(ur); free(ut); free(up); hipFree(_node_t); hipFree(_node_p); hipFree(_pp); hipFree(_ur); hipFree(_ut); hipFree(_up); } extern "C" void cuda_lamb_test(void) { int i, j, k; // iterators int C; // cell locations real x, y, z; real r, theta, phi; real a = parts[0].r; real *p_a = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_c = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gcc.s3b * sizeof(real) real *p_e = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // error difference // cpumem += Dom.Gcc.s3b * sizeof(real) real *u_a = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfx.s3b * sizeof(real) real *u_c = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfx.s3b * sizeof(real) real *u_e = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfx.s3b * sizeof(real) real *v_a = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfy.s3b * sizeof(real) real *v_c = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfy.s3b * sizeof(real) real *v_e = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfy.s3b * sizeof(real) real *w_a = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfz.s3b * sizeof(real) real *w_c = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfz.s3b * sizeof(real) real *w_e = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfz.s3b * sizeof(real) // min and max error search real p_err_min = FLT_MAX; real p_err_max = FLT_MIN; real u_err_min = FLT_MAX; real u_err_max = FLT_MIN; real v_err_min = FLT_MAX; real v_err_max = FLT_MIN; real w_err_min = FLT_MAX; real w_err_max = FLT_MIN; printf("\nLamb's coefficient calculation validation:\n\n"); printf(" u = exp(x), v = exp(y), w = exp(z), "); printf("p = exp(x) + exp(y) + exp(z)\n\n"); real U = 0.; real V = 0.; real W = 0.01; // set up expected solution for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; x = (i-0.5)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-0.5)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-0.5)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); p_a[C] = -1.5*(U*st*cp + V*st*sp + W*ct)*a/r/r; p[C] = p_a[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; x = (i-1.0)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-0.5)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-0.5)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); u_a[C] = -0.75*a/r*(U + (U*st*cp + V*st*sp + W*ct)*st*cp) - 0.25*a*a*a/r/r/r*(U - 3.*(U*st*cp + V*st*sp + W*ct)*st*cp) + U; u[C] = u_a[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; x = (i-0.5)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-1.0)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-0.5)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); v_a[C] = -0.75*a/r*(V + (U*st*cp + V*st*sp + W*ct)*st*sp) - 0.25*a*a*a/r/r/r*(V - 3.*(U*st*cp + V*st*sp + W*ct)*st*sp) + V; v[C] = v_a[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; x = (i-0.5)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-0.5)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-1.0)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); w_a[C] = -0.75*a/r*(W + (U*st*cp + V*st*sp + W*ct)*ct) - 0.25*a*a*a/r/r/r*(W - 3.*(U*st*cp + V*st*sp + W*ct)*ct) + W; w[C] = w_a[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK(); printf("done.\n"); // set up expected solution for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; x = (i-0.5)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-0.5)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-0.5)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); p_a[C] = -1.5*(U*st*cp + V*st*sp + W*ct)*a/r/r; p[C] = p_a[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; x = (i-1.0)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-0.5)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-0.5)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); u_a[C] = -0.75*a/r*(U + (U*st*cp + V*st*sp + W*ct)*st*cp) - 0.25*a*a*a/r/r/r*(U - 3.*(U*st*cp + V*st*sp + W*ct)*st*cp) + U; u[C] = u_a[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; x = (i-0.5)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-1.0)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-0.5)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); v_a[C] = -0.75*a/r*(V + (U*st*cp + V*st*sp + W*ct)*st*sp) - 0.25*a*a*a/r/r/r*(V - 3.*(U*st*cp + V*st*sp + W*ct)*st*sp) + V; v[C] = v_a[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; x = (i-0.5)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-0.5)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-1.0)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); w_a[C] = -0.75*a/r*(W + (U*st*cp + V*st*sp + W*ct)*ct) - 0.25*a*a*a/r/r/r*(W - 3.*(U*st*cp + V*st*sp + W*ct)*ct) + W; w[C] = w_a[C]; } } } // write initial fields (same as expected solution) rec_paraview_stepnum_out++; printf(" Writing initial fields to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK(); printf("done.\n"); // push fields to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // call code to test printf(" Running cuda_part_BC()..."); cuda_Lamb(); cuda_part_BC(); cuda_part_pull(); char nam[FILE_NAME_SIZE] = "lamb.rec"; recorder_lamb(nam,0); printf("done.\n"); // pull fields back to host printf(" Pulling fields back to host..."); //cuda_div_U(); cuda_dom_pull(); printf("done.\n"); // write computed solution rec_paraview_stepnum_out++; printf("\n Writing computed solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK(); printf("done.\n"); // copy results and compute error for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p_c[C] = p[C]; if(p_c[C] != 0) p_e[C] = (p_c[C] - p_a[C]) / p_c[C]; else p_e[C] = (p_c[C] - p_a[C]); if(fabs(p_e[C]) > p_err_max) p_err_max = fabs(p_e[C]); if(fabs(p_e[C]) < p_err_min) p_err_min = fabs(p_e[C]); p[C] = p_e[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_c[C] = u[C]; /*if(u_c[C] != 0) u_e[C] = (u_c[C] - u_a[C]) / u_c[C]; else */ u_e[C] = (u_c[C] - u_a[C]); /*if(fabs(u_e[C]) > u_err_max) u_err_max = fabs(u_e[C]); if(fabs(u_e[C]) < u_err_min) u_err_min = fabs(u_e[C]); */ u[C] = u_e[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_c[C] = v[C]; /*if(v_c[C] != 0) v_e[C] = (v_c[C] - v_a[C]) / v_c[C]; else */ v_e[C] = (v_c[C] - v_a[C]); /*if(fabs(v_e[C]) > v_err_max) v_err_max = fabs(v_e[C]); if(fabs(v_e[C]) < v_err_min) v_err_min = fabs(v_e[C]); */ v[C] = v_e[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_c[C] = w[C]; /*if(w_c[C] != 0) w_e[C] = (w_c[C] - w_a[C]) / w_c[C]; else */ w_e[C] = (w_c[C] - w_a[C]); /*if(fabs(w_e[C]) > w_err_max) w_err_max = fabs(w_e[C]); if(fabs(w_e[C]) < w_err_min) w_err_min = fabs(w_e[C]); */ w[C] = w_e[C]; } } } // write error difference rec_paraview_stepnum_out++; printf(" Writing error difference to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK(); printf("done.\n"); printf("\n Error summary:\n"); printf(" Field variable: minimum error: maximum error:\n"); printf(" p %12.3e %12.3e\n", p_err_min, p_err_max); printf(" u %12.3e %12.3e\n", u_err_min, u_err_max); printf(" v %12.3e %12.3e\n", v_err_min, v_err_max); printf(" w %12.3e %12.3e\n\n", w_err_min, w_err_max); // clean up free(p_a); free(p_c); free(p_e); free(u_a); free(u_c); free(u_e); free(v_a); free(v_c); free(v_e); free(w_a); free(w_c); free(w_e); }
5b33cc6a49bb2a1771c52f29f08af5d10ce285f7.cu
/******************************************************************************* ********************************* BLUEBOTTLE ********************************** ******************************************************************************* * * Copyright 2012 - 2015 Adam Sierakowski, The Johns Hopkins University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Please contact the Johns Hopkins University to use Bluebottle for * commercial and/or for-profit applications. ******************************************************************************/ extern "C" { #include "bluebottle.h" } #include <cuda.h> #include "cuda_quadrature.h" extern "C" void cuda_U_star_test_exp(void) { int i, j, k; // iterators int C; // cell locations real *u_a = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_c = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_e = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfx.s3b * sizeof(real); real *v_a = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_c = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_e = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfy.s3b * sizeof(real); real *w_a = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_c = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_e = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfz.s3b * sizeof(real); // min and max error search real u_err_min = FLT_MAX; real u_err_max = FLT_MIN; real v_err_min = FLT_MAX; real v_err_max = FLT_MIN; real w_err_min = FLT_MAX; real w_err_max = FLT_MIN; printf("\nIntermediate velocity calculation validation:\n\n"); printf(" u = exp(x), v = exp(y), w = exp(z)\n\n"); // set up expected solution for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_a[C] = nu; u_a[C] -= 2 * exp((i-1.5)*Dom.dx); u_a[C] -= exp((j-1.0)*Dom.dy); u_a[C] -= exp((k-1.0)*Dom.dz); u_a[C] *= dt * exp((i-1.5)*Dom.dx); u_a[C] += exp((i-1.5)*Dom.dx); u[C] = u_a[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_a[C] = nu; v_a[C] -= 2 * exp((j-1.5)*Dom.dy); v_a[C] -= exp((k-1.0)*Dom.dz); v_a[C] -= exp((i-1.0)*Dom.dx); v_a[C] *= dt * exp((j-1.5)*Dom.dy); v_a[C] += exp((j-1.5)*Dom.dy); v[C] = v_a[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_a[C] = nu; w_a[C] -= 2 * exp((k-1.5)*Dom.dz); w_a[C] -= exp((i-1.0)*Dom.dx); w_a[C] -= exp((j-1.0)*Dom.dy); w_a[C] *= dt * exp((k-1.5)*Dom.dz); w_a[C] += exp((k-1.5)*Dom.dz); w[C] = w_a[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // initialize input velocity fields for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u[C] = exp((i-1.5)*Dom.dx); } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v[C] = exp((j-1.5)*Dom.dy); } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w[C] = exp((k-1.5)*Dom.dz); } } } // write initial fields rec_paraview_stepnum_out++; printf(" Writing initial fields to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // push fields to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // call code to test #ifdef EXPLICIT printf(" Running cuda_U_star_2()..."); cuda_U_star_2(); printf("done.\n"); #endif // pull fields back to host printf(" Pulling fields back to host..."); cuda_dom_pull(); printf("done.\n"); // write computed solution rec_paraview_stepnum_out++; printf("\n Writing computed solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // copy results and compute error for(k = Dom.Gfx.ks; k < Dom.Gfx.ke; k++) { for(j = Dom.Gfx.js; j < Dom.Gfx.je; j++) { for(i = Dom.Gfx.is; i < Dom.Gfx.ie; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_c[C] = u[C]; u_e[C] = (u_c[C] - u_a[C]) / u_a[C]; if(fabs(u_e[C]) > u_err_max) u_err_max = fabs(u_e[C]); if(fabs(u_e[C]) < u_err_min) u_err_min = fabs(u_e[C]); u[C] = u_e[C]; } } } for(k = Dom.Gfy.ks; k < Dom.Gfy.ke; k++) { for(j = Dom.Gfy.js; j < Dom.Gfy.je; j++) { for(i = Dom.Gfy.is; i < Dom.Gfy.ie; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_c[C] = v[C]; v_e[C] = (v_c[C] - v_a[C]) / v_a[C]; if(fabs(v_e[C]) > v_err_max) v_err_max = fabs(v_e[C]); if(fabs(v_e[C]) < v_err_min) v_err_min = fabs(v_e[C]); v[C] = v_e[C]; } } } for(k = Dom.Gfz.ks; k < Dom.Gfz.ke; k++) { for(j = Dom.Gfz.js; j < Dom.Gfz.je; j++) { for(i = Dom.Gfz.is; i < Dom.Gfz.ie; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_c[C] = w[C]; w_e[C] = (w_c[C] - w_a[C]) / w_a[C]; if(fabs(w_e[C]) > w_err_max) w_err_max = fabs(w_e[C]); if(fabs(w_e[C]) < w_err_min) w_err_min = fabs(w_e[C]); w[C] = w_e[C]; } } } // write error difference rec_paraview_stepnum_out++; printf(" Writing error difference to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); printf("\n Error summary:\n"); printf(" Velocity component: minimum error: maximum error:\n"); printf(" u %12.3e %12.3e\n", u_err_min, u_err_max); printf(" v %12.3e %12.3e\n", v_err_min, v_err_max); printf(" w %12.3e %12.3e\n\n", w_err_min, w_err_max); // clean up free(u_a); free(u_c); free(u_e); free(v_a); free(v_c); free(v_e); free(w_a); free(w_c); free(w_e); } extern "C" void cuda_U_star_test_cos(void) { int i, j, k; // iterators int C; // cell locations real *u_a = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_c = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_e = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfx.s3b * sizeof(real); real *v_a = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_c = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_e = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfy.s3b * sizeof(real); real *w_a = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_c = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_e = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfz.s3b * sizeof(real); // min and max error search real u_err_min = FLT_MAX; real u_err_max = FLT_MIN; real v_err_min = FLT_MAX; real v_err_max = FLT_MIN; real w_err_min = FLT_MAX; real w_err_max = FLT_MIN; printf("Intermediate velocity calculation validation:\n\n"); printf(" u = cos(y), v = cos(z), w = cos(x)\n\n"); dt = 1; //dt0 = dt; printf("dt = %f, dt0 = %f\n", dt, dt0); // set up expected solution for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; /* u_a[C] = nu * sin((j-1.0)*Dom.dy); u_a[C] += cos((j-1.0)*Dom.dy) * sin((k-1.0)*Dom.dz); u_a[C] *= -dt; u_a[C] += sin((j-1.0)*Dom.dy); */ real x = 2.*PI*(i-1.0)*Dom.dx; real y = 2.*PI*(j-0.5)*Dom.dy; //real z = 2.*PI*(k-0.5)*Dom.dz; //u_a[C] = -4.*PI*sin(x)*cos(x)*sin(y)*sin(y); //u_a[C] += 2.*PI*sin(x)*cos(x)*(sin(y)*sin(y)-cos(y)*cos(y)); u_a[C] = 8.*PI*PI*nu*cos(x)*sin(y); //u_a[C] += PI*sin(2.*x); u_a[C] *= -dt; //u_a[C] = cos(x)*sin(y) * exp(-16.*PI*PI*1.0*dt); u[C] = u_a[C]; conv0_u[C] = -4.*PI*sin(x)*cos(x)*sin(y)*sin(y) + 2.*PI*sin(x)*cos(x) *(sin(y)*sin(y)-cos(y)*cos(y)); conv0_u[C] *= exp(16.*PI*PI*1.0*dt); #ifdef EXPLICIT diff0_u[C] = -8.*PI*PI*nu*cos(x)*sin(y); diff0_u[C] *= exp(16.*PI*PI*1.0*dt); #endif } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; /* v_a[C] = nu * sin((k-1.0)*Dom.dz); v_a[C] += cos((k-1.0)*Dom.dz) * sin((i-1.0)*Dom.dx); v_a[C] *= -dt; v_a[C] += sin((k-1.0)*Dom.dz); */ real x = 2.*PI*(i-0.5)*Dom.dx; real y = 2.*PI*(j-1.0)*Dom.dy; //real z = 2.*PI*(k-0.5)*Dom.dz; //v_a[C] = 2.*PI*cos(y)*sin(y)*(sin(x)*sin(x)-cos(x)*cos(x)); //v_a[C] += -4.*PI*sin(x)*sin(x)*cos(y)*sin(y); v_a[C] = -8.*PI*PI*nu*sin(x)*cos(y); // v_a[C] += PI*sin(2.*y); v_a[C] *= -dt; //v_a[C] = -sin(x)*cos(y) * exp(-16.*PI*PI*1.0*dt); v[C] = v_a[C]; conv0_v[C] = -4.*PI*sin(x)*sin(x)*sin(y)*cos(y) + 2.*PI*sin(y)*cos(y) *(sin(x)*sin(x)-cos(x)*cos(x)); conv0_v[C] *= exp(16.*PI*PI*1.0*dt); #ifdef EXPLICIT diff0_v[C] = 8.*PI*PI*nu*sin(x)*cos(y); diff0_v[C] *= exp(16.*PI*PI*1.0*dt); #endif } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_a[C] = nu * sin((i-1.0)*Dom.dx); w_a[C] += cos((i-1.0)*Dom.dx) * sin((j-1.0)*Dom.dy); w_a[C] *= -dt; w_a[C] += sin((i-1.0)*Dom.dx); w_a[C] = 0; w[C] = w_a[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // initialize input pressure and velocity fields for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; real x = 2.*PI*(i-1.0)*Dom.dx; real y = 2.*PI*(j-0.5)*Dom.dy; //real z = 2.*PI*(k-0.5)*Dom.dz; //u[C] = sin((j-1.0)*Dom.dy); u[C] = cos(x)*sin(y); } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; real x = 2.*PI*(i-0.5)*Dom.dx; real y = 2.*PI*(j-1.0)*Dom.dy; //real z = 2.*PI*(k-0.5)*Dom.dz; //v[C] = sin((k-1.0)*Dom.dz); v[C] = -sin(x)*cos(y); } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; //w[C] = sin((i-1.0)*Dom.dx); w[C] = 0; } } } for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; real x = 2.*PI*(i-0.5)*Dom.dx; real y = 2.*PI*(j-0.5)*Dom.dy; //real z = 2.*PI*(k-0.5)*Dom.dz; p[C] = -0.25*rho_f*(cos(2.*x)+cos(2.*y)); p0[C] = -0.25*rho_f*(cos(2.*x)+cos(2.*y)); } } } // write initial fields rec_paraview_stepnum_out++; printf(" Writing initial fields to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // push fields to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // call code to test #ifdef EXPLICIT printf(" Running cuda_U_star_2()..."); cuda_U_star_2(); printf("done.\n"); #endif // pull fields back to host printf(" Pulling fields back to host..."); cuda_dom_pull(); printf("done.\n"); // write computed solution rec_paraview_stepnum_out++; printf("\n Writing computed solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // copy results and compute error u_err_max = 0; for(k = Dom.Gfx.ks; k < Dom.Gfx.ke; k++) { for(j = Dom.Gfx.js; j < Dom.Gfx.je; j++) { for(i = Dom.Gfx.is; i < Dom.Gfx.ie; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_c[C] = u_star[C]; u_e[C] = (u_c[C] - u_a[C]);// / u_a[C]; u_err_max += fabs(u_e[C]); //if(fabs(u_e[C]) > u_err_max) u_err_max = fabs(u_e[C]); if(fabs(u_e[C]) < u_err_min) u_err_min = fabs(u_e[C]); u[C] = u_e[C]; } } } v_err_max = 0; for(k = Dom.Gfy.ks; k < Dom.Gfy.ke; k++) { for(j = Dom.Gfy.js; j < Dom.Gfy.je; j++) { for(i = Dom.Gfy.is; i < Dom.Gfy.ie; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_c[C] = v_star[C]; v_e[C] = (v_c[C] - v_a[C]);// / v_a[C]; v_err_max += fabs(v_e[C]); //if(fabs(v_e[C]) > v_err_max) v_err_max = fabs(v_e[C]); if(fabs(v_e[C]) < v_err_min) v_err_min = fabs(v_e[C]); v[C] = v_e[C]; } } } for(k = Dom.Gfz.ks; k < Dom.Gfz.ke; k++) { for(j = Dom.Gfz.js; j < Dom.Gfz.je; j++) { for(i = Dom.Gfz.is; i < Dom.Gfz.ie; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_c[C] = w_star[C]; w_e[C] = (w_c[C] - w_a[C]);// / w_a[C]; if(fabs(w_e[C]) > w_err_max) w_err_max = fabs(w_e[C]); if(fabs(w_e[C]) < w_err_min) w_err_min = fabs(w_e[C]); w[C] = w_e[C]; } } } // write error difference rec_paraview_stepnum_out++; printf(" Writing error difference to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); printf("\n Error summary:\n"); printf(" Velocity component: minimum error: maximum error:\n"); printf(" u %12.3e %12.3e\n", u_err_min, u_err_max); printf(" v %12.3e %12.3e\n", v_err_min, v_err_max); printf(" w %12.3e %12.3e\n\n", w_err_min, w_err_max); // clean up free(u_a); free(u_c); free(u_e); free(v_a); free(v_c); free(v_e); free(w_a); free(w_c); free(w_e); } __global__ void memcpy_u_star_test(real *dst, real *src, dom_struct *dom) { int j = blockIdx.x * blockDim.x + threadIdx.x; int k = blockIdx.y * blockDim.y + threadIdx.y; for(int i = dom->Gfx._is; i < dom->Gfx._ie; i++) { dst[i + j * dom->Gfx._s1b + k * dom->Gfx._s2b] = src[i + j * dom->Gfx._s1b + k * dom->Gfx._s2b]; } } __global__ void memcpy_v_star_test(real *dst, real *src, dom_struct *dom) { int k = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; for(int j = dom->Gfy._js; j < dom->Gfy._je; j++) { dst[i + j * dom->Gfy._s1b + k * dom->Gfy._s2b] = src[i + j * dom->Gfy._s1b + k * dom->Gfy._s2b]; } } __global__ void memcpy_w_star_test(real *dst, real *src, dom_struct *dom) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; for(int k = dom->Gfz._ks; k < dom->Gfz._ke; k++) { dst[i + j * dom->Gfz._s1b + k * dom->Gfz._s2b] = src[i + j * dom->Gfz._s1b + k * dom->Gfz._s2b]; } } __global__ void PP_memcpy_p_test(real *dst, real *src, dom_struct *dom) { int j = blockIdx.x * blockDim.x + threadIdx.x; int k = blockIdx.y * blockDim.y + threadIdx.y; for(int i = dom->Gcc._is-DOM_BUF; i < dom->Gcc._ie-DOM_BUF; i++) { dst[(i+DOM_BUF) + (j+DOM_BUF) * dom->Gfx._s1b + (k+DOM_BUF) * dom->Gcc._s2b] = src[i + j * dom->Gcc._s1b + k * dom->Gcc._s2b]; } } extern "C" void cuda_BC_test(void) { int i, j, k; // iterators int C; // cell locations real *p_p_i = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // periodic input // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_p_o = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // periodic output // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_p_e = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // periodic error // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_d_i = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // Dirichlet input // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_d_o = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // Dirichlet output // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_d_e = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // Dirichlet error // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_n_i = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // Neumann input // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_n_o = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // Neumann output // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_n_e = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // Neumann error // cpumem += Dom.Gcc.s3b * sizeof(real); real *u_p_i = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // periodic input // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_p_o = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // periodic output // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_p_e = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // periodic error // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_d_i = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // Dirichlet input // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_d_o = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // Dirichlet output // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_d_e = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // Dirichlet error // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_n_i = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // Neumann input // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_n_o = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // Neumann output // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_n_e = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // Neumann error // cpumem += Dom.Gfx.s3b * sizeof(real); real *v_p_i = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // periodic input // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_p_o = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // periodic output // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_p_e = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // periodic error // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_d_i = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // Dirichlet input // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_d_o = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // Dirichlet output // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_d_e = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // Dirichlet error // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_n_i = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // Neumann input // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_n_o = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // Neumann output // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_n_e = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // Neumann error // cpumem += Dom.Gfy.s3b * sizeof(real); real *w_p_i = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // periodic input // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_p_o = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // periodic output // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_p_e = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // periodic error // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_d_i = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // Dirichlet input // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_d_o = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // Dirichlet output // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_d_e = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // Dirichlet error // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_n_i = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // Neumann input // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_n_o = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // Neumann output // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_n_e = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // Neumann error // cpumem += Dom.Gfz.s3b * sizeof(real); // min and max error search real p_p_err_min = FLT_MAX; real p_p_err_max = FLT_MIN; real p_d_err_min = FLT_MAX; real p_d_err_max = FLT_MIN; real p_n_err_min = FLT_MAX; real p_n_err_max = FLT_MIN; real u_p_err_min = FLT_MAX; real u_p_err_max = FLT_MIN; real u_d_err_min = FLT_MAX; real u_d_err_max = FLT_MIN; real u_n_err_min = FLT_MAX; real u_n_err_max = FLT_MIN; real v_p_err_min = FLT_MAX; real v_p_err_max = FLT_MIN; real v_d_err_min = FLT_MAX; real v_d_err_max = FLT_MIN; real v_n_err_min = FLT_MAX; real v_n_err_max = FLT_MIN; real w_p_err_min = FLT_MAX; real w_p_err_max = FLT_MIN; real w_d_err_min = FLT_MAX; real w_d_err_max = FLT_MIN; real w_n_err_min = FLT_MAX; real w_n_err_max = FLT_MIN; printf("\nBoundary condition application validation:\n"); // periodic field (on -1 <= x <= 1, -1 <= y <= 1, -1 <= z <= 1) printf("\n Periodic boundary conditions:\n"); printf(" p = cos(pi*x) + cos(pi*y) + cos(pi*z)\n"); printf(" u = cos(pi*x) + cos(pi*y) + cos(pi*z)\n"); printf(" v = cos(pi*x) + cos(pi*y) + cos(pi*z)\n"); printf(" w = cos(pi*x) + cos(pi*y) + cos(pi*z)\n\n"); // write input fields for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p_p_i[C] = cos(PI * (i-0.5)*Dom.dx); p_p_i[C] += cos(PI * (j-0.5)*Dom.dy); p_p_i[C] += cos(PI * (k-0.5)*Dom.dz); p[C] = p_p_i[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_p_i[C] = cos(PI * (i-1.0)*Dom.dx); u_p_i[C] += cos(PI * (j-0.5)*Dom.dy); u_p_i[C] += cos(PI * (k-0.5)*Dom.dz); u[C] = u_p_i[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_p_i[C] = cos(PI * (i-0.5)*Dom.dx); v_p_i[C] += cos(PI * (j-1.0)*Dom.dy); v_p_i[C] += cos(PI * (k-0.5)*Dom.dz); v[C] = v_p_i[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_p_i[C] = cos(PI * (i-0.5)*Dom.dx); w_p_i[C] += cos(PI * (j-0.5)*Dom.dy); w_p_i[C] += cos(PI * (k-1.0)*Dom.dz); w[C] = w_p_i[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // push to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // set BC to PERIODIC (overwrite input file for testing) printf(" Overwriting input file boundary conditions to ensure "); printf("PERIODIC..."); bc.pW = PERIODIC; bc.pE = PERIODIC; bc.pS = PERIODIC; bc.pN = PERIODIC; bc.pB = PERIODIC; bc.pT = PERIODIC; bc.uW = PERIODIC; bc.uE = PERIODIC; bc.uS = PERIODIC; bc.uN = PERIODIC; bc.uB = PERIODIC; bc.uT = PERIODIC; bc.vW = PERIODIC; bc.vE = PERIODIC; bc.vS = PERIODIC; bc.vN = PERIODIC; bc.vB = PERIODIC; bc.vT = PERIODIC; bc.wW = PERIODIC; bc.wE = PERIODIC; bc.wS = PERIODIC; bc.wN = PERIODIC; bc.wB = PERIODIC; bc.wT = PERIODIC; printf("done.\n"); // apply BC printf(" Running cuda_BC()..."); cuda_dom_BC(); printf("done.\n"); // pull to host printf(" Pulling fields back to host..."); cuda_dom_pull(); printf("done.\n"); // write computed solution rec_paraview_stepnum_out++; printf("\n Writing computed solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // copy results and compute error for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p_p_o[C] = p[C]; p_p_e[C] = p_p_o[C] - p_p_i[C]; if(fabs(p_p_e[C]) > p_p_err_max) p_p_err_max = fabs(p_p_e[C]); if(fabs(p_p_e[C]) < p_p_err_min) p_p_err_min = fabs(p_p_e[C]); p[C] = p_p_e[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_p_o[C] = u[C]; u_p_e[C] = u_p_o[C] - u_p_i[C]; if(fabs(u_p_e[C]) > u_p_err_max) u_p_err_max = fabs(u_p_e[C]); if(fabs(u_p_e[C]) < u_p_err_min) u_p_err_min = fabs(u_p_e[C]); u[C] = u_p_e[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_p_o[C] = v[C]; v_p_e[C] = v_p_o[C] - v_p_i[C]; if(fabs(v_p_e[C]) > v_p_err_max) v_p_err_max = fabs(v_p_e[C]); if(fabs(v_p_e[C]) < v_p_err_min) v_p_err_min = fabs(v_p_e[C]); v[C] = v_p_e[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_p_o[C] = w[C]; w_p_e[C] = w_p_o[C] - w_p_i[C]; if(fabs(w_p_e[C]) > w_p_err_max) w_p_err_max = fabs(w_p_e[C]); if(fabs(w_p_e[C]) < w_p_err_min) w_p_err_min = fabs(w_p_e[C]); w[C] = w_p_e[C]; } } } // write error difference rec_paraview_stepnum_out++; printf(" Writing error difference to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); printf("\n Error summary:\n"); printf(" Field component: minimum error: maximum error:\n"); printf(" p %12.3e %12.3e\n", p_p_err_min, p_p_err_max); printf(" u %12.3e %12.3e\n", u_p_err_min, u_p_err_max); printf(" v %12.3e %12.3e\n", v_p_err_min, v_p_err_max); printf(" w %12.3e %12.3e\n", w_p_err_min, w_p_err_max); // Dirichlet field (on -1 <= x <= 1, -1 <= y <= 1, -1 <= z <= 1) printf("\n Dirichlet boundary conditions:\n"); printf(" p = sin(pi*x) * sin(pi*y) * sin(pi*z)\n"); printf(" u = sin(pi*x) * sin(pi*y) * sin(pi*z)\n"); printf(" v = sin(pi*x) * sin(pi*y) * sin(pi*z)\n"); printf(" w = sin(pi*x) * sin(pi*y) * sin(pi*z)\n\n"); // write input field for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p_d_i[C] = sin(PI * (i-0.5)*Dom.dx); p_d_i[C] *= sin(PI * (j-0.5)*Dom.dy); p_d_i[C] *= sin(PI * (k-0.5)*Dom.dz); p[C] = p_d_i[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_d_i[C] = sin(PI * (i-1.0)*Dom.dx); u_d_i[C] *= sin(PI * (j-0.5)*Dom.dy); u_d_i[C] *= sin(PI * (k-0.5)*Dom.dz); u[C] = u_d_i[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_d_i[C] = sin(PI * (i-0.5)*Dom.dx); v_d_i[C] *= sin(PI * (j-1.0)*Dom.dy); v_d_i[C] *= sin(PI * (k-0.5)*Dom.dz); v[C] = v_d_i[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_d_i[C] = sin(PI * (i-0.5)*Dom.dx); w_d_i[C] *= sin(PI * (j-0.5)*Dom.dy); w_d_i[C] *= sin(PI * (k-1.0)*Dom.dz); w[C] = w_d_i[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // push to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // set BC to DIRICHLET (overwrite input file for testing) printf(" Overwriting input file boundary conditions to ensure "); printf("DIRICHLET..."); bc.pW = DIRICHLET; bc.pWD = 0.; bc.pE = DIRICHLET; bc.pED = 0.; bc.pS = DIRICHLET; bc.pSD = 0.; bc.pN = DIRICHLET; bc.pND = 0.; bc.pB = DIRICHLET; bc.pBD = 0.; bc.pT = DIRICHLET; bc.pTD = 0.; bc.uW = DIRICHLET; bc.uWD = 0.; bc.uE = DIRICHLET; bc.uED = 0.; bc.uS = DIRICHLET; bc.uSD = 0.; bc.uN = DIRICHLET; bc.uND = 0.; bc.uB = DIRICHLET; bc.uBD = 0.; bc.uT = DIRICHLET; bc.uTD = 0.; bc.vW = DIRICHLET; bc.vWD = 0.; bc.vE = DIRICHLET; bc.vED = 0.; bc.vS = DIRICHLET; bc.vSD = 0.; bc.vN = DIRICHLET; bc.vND = 0.; bc.vB = DIRICHLET; bc.vBD = 0.; bc.vT = DIRICHLET; bc.vTD = 0.; bc.wW = DIRICHLET; bc.wWD = 0.; bc.wE = DIRICHLET; bc.wED = 0.; bc.wS = DIRICHLET; bc.wSD = 0.; bc.wN = DIRICHLET; bc.wND = 0.; bc.wB = DIRICHLET; bc.wBD = 0.; bc.wT = DIRICHLET; bc.wTD = 0.; printf("done.\n"); // apply BC printf(" Running cuda_BC()..."); cuda_dom_BC(); printf("done.\n"); // pull to host printf(" Pulling fields back to host..."); cuda_dom_pull(); printf("done.\n"); // write computed solution rec_paraview_stepnum_out++; printf("\n Writing computed solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // copy results and compute error for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p_d_o[C] = p[C]; p_d_e[C] = p_d_o[C] - p_d_i[C]; if(fabs(p_d_e[C]) > p_d_err_max) p_d_err_max = fabs(p_d_e[C]); if(fabs(p_d_e[C]) < p_d_err_min) p_d_err_min = fabs(p_d_e[C]); p[C] = p_d_e[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_d_o[C] = u[C]; u_d_e[C] = u_d_o[C] - u_d_i[C]; if(fabs(u_d_e[C]) > u_d_err_max) u_d_err_max = fabs(u_d_e[C]); if(fabs(u_d_e[C]) < u_d_err_min) u_d_err_min = fabs(u_d_e[C]); u[C] = u_d_e[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_d_o[C] = v[C]; v_d_e[C] = v_d_o[C] - v_d_i[C]; if(fabs(v_d_e[C]) > v_d_err_max) v_d_err_max = fabs(v_d_e[C]); if(fabs(v_d_e[C]) < v_d_err_min) v_d_err_min = fabs(v_d_e[C]); v[C] = v_d_e[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_d_o[C] = w[C]; w_d_e[C] = w_d_o[C] - w_d_i[C]; if(fabs(w_d_e[C]) > w_d_err_max) w_d_err_max = fabs(w_d_e[C]); if(fabs(w_d_e[C]) < w_d_err_min) w_d_err_min = fabs(w_d_e[C]); w[C] = w_d_e[C]; } } } // write error difference rec_paraview_stepnum_out++; printf(" Writing error difference to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); printf("\n Error summary:\n"); printf(" Field component: minimum error: maximum error:\n"); printf(" p %12.3e %12.3e\n", p_d_err_min, p_d_err_max); printf(" u %12.3e %12.3e\n", u_d_err_min, u_d_err_max); printf(" v %12.3e %12.3e\n", v_d_err_min, v_d_err_max); printf(" w %12.3e %12.3e\n", w_d_err_min, w_d_err_max); // Neumann field (on -1 <= x <= 1, -1 <= y <= 1, -1 <= z <= 1) printf("\n Neumann boundary conditions:\n"); printf(" p = cos(pi*x) + cos(pi*y) + cos(pi*z)\n"); printf(" u = cos(pi*x) + cos(pi*y) + cos(pi*z)\n"); printf(" v = cos(pi*x) + cos(pi*y) + cos(pi*z)\n"); printf(" w = cos(pi*x) + cos(pi*y) + cos(pi*z)\n\n"); // write input field for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p_n_i[C] = cos(PI * (i-0.5)*Dom.dx); p_n_i[C] += cos(PI * (j-0.5)*Dom.dy); p_n_i[C] += cos(PI * (k-0.5)*Dom.dz); p[C] = p_n_i[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_n_i[C] = cos(PI * (i-1.0)*Dom.dx); u_n_i[C] += cos(PI * (j-0.5)*Dom.dy); u_n_i[C] += cos(PI * (k-0.5)*Dom.dz); u[C] = u_n_i[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_n_i[C] = cos(PI * (i-0.5)*Dom.dx); v_n_i[C] += cos(PI * (j-1.0)*Dom.dy); v_n_i[C] += cos(PI * (k-0.5)*Dom.dz); v[C] = v_n_i[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_n_i[C] = cos(PI * (i-0.5)*Dom.dx); w_n_i[C] += cos(PI * (j-0.5)*Dom.dy); w_n_i[C] += cos(PI * (k-1.0)*Dom.dz); w[C] = w_n_i[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // push to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // set BC to NEUMANN (overwrite input file for testing) printf(" Overwriting input file boundary conditions to ensure "); printf("NEUMANN..."); bc.pW = NEUMANN; bc.pE = NEUMANN; bc.pS = NEUMANN; bc.pN = NEUMANN; bc.pB = NEUMANN; bc.pT = NEUMANN; bc.uW = NEUMANN; bc.uE = NEUMANN; bc.uS = NEUMANN; bc.uN = NEUMANN; bc.uB = NEUMANN; bc.uT = NEUMANN; bc.vW = NEUMANN; bc.vE = NEUMANN; bc.vS = NEUMANN; bc.vN = NEUMANN; bc.vB = NEUMANN; bc.vT = NEUMANN; bc.wW = NEUMANN; bc.wE = NEUMANN; bc.wS = NEUMANN; bc.wN = NEUMANN; bc.wB = NEUMANN; bc.wT = NEUMANN; printf("done.\n"); // apply BC printf(" Running cuda_BC()..."); cuda_dom_BC(); printf("done.\n"); // pull to host printf(" Pulling fields back to host..."); cuda_dom_pull(); printf("done.\n"); // write computed solution rec_paraview_stepnum_out++; printf("\n Writing computed solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // copy results and compute error for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p_n_o[C] = p[C]; p_n_e[C] = p_n_o[C] - p_n_i[C]; if(fabs(p_n_e[C]) > p_n_err_max) p_n_err_max = fabs(p_n_e[C]); if(fabs(p_n_e[C]) < p_n_err_min) p_n_err_min = fabs(p_n_e[C]); p[C] = p_n_e[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_n_o[C] = u[C]; u_n_e[C] = u_n_o[C] - u_n_i[C]; if(fabs(u_n_e[C]) > u_n_err_max) u_n_err_max = fabs(u_n_e[C]); if(fabs(u_n_e[C]) < u_n_err_min) u_n_err_min = fabs(u_n_e[C]); u[C] = u_n_e[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_n_o[C] = v[C]; v_n_e[C] = v_n_o[C] - v_n_i[C]; if(fabs(v_n_e[C]) > v_n_err_max) v_n_err_max = fabs(v_n_e[C]); if(fabs(v_n_e[C]) < v_n_err_min) v_n_err_min = fabs(v_n_e[C]); v[C] = v_n_e[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_n_o[C] = w[C]; w_n_e[C] = w_n_o[C] - w_n_i[C]; if(fabs(w_n_e[C]) > w_n_err_max) w_n_err_max = fabs(w_n_e[C]); if(fabs(w_n_e[C]) < w_n_err_min) w_n_err_min = fabs(w_n_e[C]); w[C] = w_n_e[C]; } } } // write error difference rec_paraview_stepnum_out++; printf(" Writing error difference to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); printf("\n Error summary:\n"); printf(" Field component: minimum error: maximum error:\n"); printf(" p %12.3e %12.3e\n", p_n_err_min, p_n_err_max); printf(" u %12.3e %12.3e\n", u_n_err_min, u_n_err_max); printf(" v %12.3e %12.3e\n", v_n_err_min, v_n_err_max); printf(" w %12.3e %12.3e\n", w_n_err_min, w_n_err_max); // clean up free(p_p_i); free(p_p_o); free(p_p_e); free(p_d_i); free(p_d_o); free(p_d_e); free(p_n_i); free(p_n_o); free(p_n_e); free(u_p_i); free(u_p_o); free(u_p_e); free(u_d_i); free(u_d_o); free(u_d_e); free(u_n_i); free(u_n_o); free(u_n_e); free(v_p_i); free(v_p_o); free(v_p_e); free(v_d_i); free(v_d_o); free(v_d_e); free(v_n_i); free(v_n_o); free(v_n_e); free(w_p_i); free(w_p_o); free(w_p_e); free(w_d_i); free(w_d_o); free(w_d_e); free(w_n_i); free(w_n_o); free(w_n_e); } void cuda_project_test(void) { int i, j, k; // iterators int C; // cell locations real *u_a = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_c = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfx.s3b * sizeof(real); real *u_e = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfx.s3b * sizeof(real); real *v_a = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_c = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfy.s3b * sizeof(real); real *v_e = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // error difference // cpumem += Dom.Gyz.s3b * sizeof(real); real *w_a = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_c = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfz.s3b * sizeof(real); real *w_e = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfz.s3b * sizeof(real); // min and max error search real u_err_min = FLT_MAX; real u_err_max = FLT_MIN; real v_err_min = FLT_MAX; real v_err_max = FLT_MIN; real w_err_min = FLT_MAX; real w_err_max = FLT_MIN; printf("\nVelocity projection calculation validation:\n\n"); printf(" u = exp(x), v = exp(y), w = exp(z), "); printf("p = exp(x) + exp(y) + exp(z)\n\n"); // set up expected solution for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_a[C] = nu; u_a[C] -= 1. / rho_f; u_a[C] -= 2 * exp((i-1.5)*Dom.dx); u_a[C] -= exp((j-1.0)*Dom.dy); u_a[C] -= exp((k-1.0)*Dom.dz); u_a[C] *= dt * exp((i-1.5)*Dom.dx); u_a[C] += exp((i-1.5)*Dom.dx); u[C] = u_a[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_a[C] = nu; v_a[C] -= 1. / rho_f; v_a[C] -= 2 * exp((j-1.5)*Dom.dy); v_a[C] -= exp((k-1.0)*Dom.dz); v_a[C] -= exp((i-1.0)*Dom.dx); v_a[C] *= dt * exp((j-1.5)*Dom.dy); v_a[C] += exp((j-1.5)*Dom.dy); v[C] = v_a[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_a[C] = nu; w_a[C] -= 1. / rho_f; w_a[C] -= 2 * exp((k-1.5)*Dom.dz); w_a[C] -= exp((i-1.0)*Dom.dx); w_a[C] -= exp((j-1.0)*Dom.dy); w_a[C] *= dt * exp((k-1.5)*Dom.dz); w_a[C] += exp((k-1.5)*Dom.dz); w[C] = w_a[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // initialize input pressure and velocity fields for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p[C] = exp((i-1.0)*Dom.dx) + exp((j-1.0)*Dom.dy) + exp((k-1.0)*Dom.dz); } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u[C] = exp((i-1.5)*Dom.dx); } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v[C] = exp((j-1.5)*Dom.dy); } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w[C] = exp((k-1.5)*Dom.dz); } } } // write initial fields rec_paraview_stepnum_out++; printf(" Writing initial fields to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // push fields to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // call code to test #ifdef EXPLICIT printf(" Running cuda_U_star_2()..."); cuda_U_star_2(); cuda_project(); printf("done.\n"); #endif // pull fields back to host printf(" Pulling fields back to host..."); cuda_dom_pull(); printf("done.\n"); // write computed solution rec_paraview_stepnum_out++; printf("\n Writing computed solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // copy results and compute error for(k = Dom.Gfx.ks; k < Dom.Gfx.ke; k++) { for(j = Dom.Gfx.js; j < Dom.Gfx.je; j++) { for(i = Dom.Gfx.is; i < Dom.Gfx.ie; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_c[C] = u[C]; u_e[C] = (u_c[C] - u_a[C]) / u_a[C]; if(fabs(u_e[C]) > u_err_max) u_err_max = fabs(u_e[C]); if(fabs(u_e[C]) < u_err_min) u_err_min = fabs(u_e[C]); u[C] = u_e[C]; } } } for(k = Dom.Gfy.ks; k < Dom.Gfy.ke; k++) { for(j = Dom.Gfy.js; j < Dom.Gfy.je; j++) { for(i = Dom.Gfy.is; i < Dom.Gfy.ie; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_c[C] = v[C]; v_e[C] = (v_c[C] - v_a[C]) / v_a[C]; if(fabs(v_e[C]) > v_err_max) v_err_max = fabs(v_e[C]); if(fabs(v_e[C]) < v_err_min) v_err_min = fabs(v_e[C]); v[C] = v_e[C]; } } } for(k = Dom.Gfz.ks; k < Dom.Gfz.ke; k++) { for(j = Dom.Gfz.js; j < Dom.Gfz.je; j++) { for(i = Dom.Gfz.is; i < Dom.Gfz.ie; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_c[C] = w[C]; w_e[C] = (w_c[C] - w_a[C]) / w_a[C]; if(fabs(w_e[C]) > w_err_max) w_err_max = fabs(w_e[C]); if(fabs(w_e[C]) < w_err_min) w_err_min = fabs(w_e[C]); w[C] = w_e[C]; } } } // write error difference rec_paraview_stepnum_out++; printf(" Writing error difference to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); printf("\n Error summary:\n"); printf(" Velocity component: minimum error: maximum error:\n"); printf(" u %12.3e %12.3e\n", u_err_min, u_err_max); printf(" v %12.3e %12.3e\n", v_err_min, v_err_max); printf(" w %12.3e %12.3e\n\n", w_err_min, w_err_max); // clean up free(u_a); free(u_c); free(u_e); free(v_a); free(v_c); free(v_e); free(w_a); free(w_c); free(w_e); } extern "C" void cuda_quad_interp_test(void) { int i, j, k; // iterators int C; real *p_a = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // cpumem += Dom.Gcc.s3b * sizeof(real); real *u_a = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // cpumem += Dom.Gfx.s3b * sizeof(real); real *v_a = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // cpumem += Dom.Gfy.s3b * sizeof(real); real *w_a = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // cpumem += Dom.Gfz.s3b * sizeof(real); real x, y, z; // min and max error search real *p_err_min = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); real *p_err_max = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); real *u_err_min = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); real *u_err_max = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); real *v_err_min = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); real *v_err_max = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); real *w_err_min = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); real *w_err_max = (real*) malloc(nparts * sizeof(real)); // cpumem += nparts * sizeof(real); (cudaSetDevice(dev_start)); printf("\nLebedev quadrature interpolation validation:\n\n"); printf(" p = u = v = w = exp(x) + exp(y) + exp(z)\n\n"); // create analytic result and push to device for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.ksb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; x = (i-0.5)*Dom.dx + Dom.xs; y = (j-0.5)*Dom.dy + Dom.ys; z = (k-0.5)*Dom.dz + Dom.zs; p_a[C] = exp(x) + exp(y) + exp(z); p[C] = p_a[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.ksb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; x = (i-1.0)*Dom.dx + Dom.xs; y = (j-0.5)*Dom.dy + Dom.ys; z = (k-0.5)*Dom.dz + Dom.zs; u_a[C] = exp(x) + exp(y) + exp(z); u[C] = u_a[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.ksb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; x = (i-0.5)*Dom.dx + Dom.xs; y = (j-1.0)*Dom.dy + Dom.ys; z = (k-0.5)*Dom.dz + Dom.zs; v_a[C] = exp(x) + exp(y) + exp(z); v[C] = v_a[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.ksb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; x = (i-0.5)*Dom.dx + Dom.xs; y = (j-0.5)*Dom.dy + Dom.ys; z = (k-1.0)*Dom.dz + Dom.zs; w_a[C] = exp(x) + exp(y) + exp(z); w[C] = w_a[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK_ghost(); printf("done.\n"); // push fields to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // call code to test printf(" Running cuda_quad_interp()..."); // set up quadrature nodes for 7th-order Lebedev quadrature /*real PI14 = 0.25 * PI; real PI12 = 0.5 * PI; real PI34 = 0.75 * PI; real PI54 = 1.25 * PI; real PI32 = 1.5 * PI; real PI74 = 1.75 * PI; real alph1 = 0.955316618124509; real alph2 = 2.186276035465284; // nodes real a1_t[6] = {PI12, PI12, PI12, PI12, 0., 0.}; real a1_p[6] = {0., PI12, PI, PI32, 0., PI}; real a2_t[12] = {PI12, PI12, PI12, PI12, PI14, PI14, PI14, PI14, PI34, PI34, PI34, PI34}; real a2_p[12] = {PI14, PI34, PI54, PI74, 0., PI12, PI, PI32, 0., PI12, PI, PI32}; real a3_t[8] = {alph1, alph1, alph1, alph1, alph2, alph2, alph2, alph2}; real a3_p[8] = {PI14, PI34, PI54, PI74, PI14, PI34, PI54, PI74}; int nnodes = 26; // put all quadrature nodes together for interpolation real node_t[26]; real node_p[26]; for(i = 0; i < 6; i++) { node_t[i] = a1_t[i]; node_p[i] = a1_p[i]; } for(i = 0; i < 12; i++) { node_t[6+i] = a2_t[i]; node_p[6+i] = a2_p[i]; } for(i = 0; i < 8; i++) { node_t[18+i] = a3_t[i]; node_p[18+i] = a3_p[i]; } */ real PI14 = 0.25 * PI; real PI12 = 0.5 * PI; real PI34 = 0.75 * PI; real PI54 = 1.25 * PI; real PI32 = 1.5 * PI; real PI74 = 1.75 * PI; real alph1 = 0.955316618124509; //54.736 real alph2 = 2.186276035465284; //125.264 /*real alph3 = 0.440510663004698; //25.239 real alph4 = 2.701081990585095; //154.761 real alph5 = 1.264518957625227; //72.452 real alph6 = 1.877073695964566; //107.548 real alph7 = 1.249045772398254; //71.565 real alph8 = 1.892546881191539; //108.435 real alph9 = 0.321750554396642; //18.435 real alph10 = 2.819842099193151; //161.565 */ // nodes TODO: find a more elegant way of fixing the divide by sin(0) real a1_t[6] = {PI12, PI12, PI12, PI12, 0.+DIV_ST, PI-DIV_ST}; real a1_p[6] = {0., PI12, PI, PI32, 0., 0.}; real a2_t[12] = {PI12, PI12, PI12, PI12, PI14, PI14, PI14, PI14, PI34, PI34, PI34, PI34}; real a2_p[12] = {PI14, PI34, PI54, PI74, 0., PI12, PI, PI32, 0., PI12, PI, PI32}; real a3_t[8] = {alph1, alph1, alph1, alph1, alph2, alph2, alph2, alph2}; real a3_p[8] = {PI14, PI34, PI54, PI74, PI14, PI34, PI54, PI74}; /*real b_t[24] = {alph3, alph4, alph3, alph4, alph3, alph4, alph3, alph4, alph5, alph5, alph6, alph6, alph5, alph5, alph6, alph6, alph5, alph5, alph6, alph6, alph5, alph5, alph6, alph6}; real b_p[24] = {PI14, PI74, PI14, PI74, PI34, PI54, PI34, PI54, alph7, -alph7, alph7, -alph7, alph8, -alph8, alph8, -alph8, alph9, alph10, alph9, alph10, -alph9, -alph10, -alph9, -alph10}; */ int nnodes = 26; // put all quadrature nodes together for interpolation real node_t[nnodes]; real node_p[nnodes]; for(i = 0; i < 6; i++) { node_t[i] = a1_t[i]; node_p[i] = a1_p[i]; } for(i = 0; i < 12; i++) { node_t[6+i] = a2_t[i]; node_p[6+i] = a2_p[i]; } for(i = 0; i < 8; i++) { node_t[18+i] = a3_t[i]; node_p[18+i] = a3_p[i]; } /*for(i = 0; i < 24; i++) { node_t[26+i] = b_t[i]; node_p[26+i] = b_p[i]; } */ // create a place to temporarily store field variables at quadrature nodes real *_node_t; real *_node_p; (cudaMalloc((void**) &_node_t, nnodes * sizeof(real))); gpumem += nnodes * sizeof(real); (cudaMalloc((void**) &_node_p, nnodes * sizeof(real))); gpumem += nnodes * sizeof(real); (cudaMemcpy(_node_t, node_t, nnodes * sizeof(real), cudaMemcpyHostToDevice)); (cudaMemcpy(_node_p, node_p, nnodes * sizeof(real), cudaMemcpyHostToDevice)); real *_pp; real *_ur; real *_ut; real *_up; real *pp = (real*) malloc(nnodes * nparts * sizeof(real)); // cpumem += nnodes * nparts * sizeof(real); real *ur = (real*) malloc(nnodes * nparts * sizeof(real)); // cpumem += nnodes * nparts * sizeof(real); real *ut = (real*) malloc(nnodes * nparts * sizeof(real)); // cpumem += nnodes * nparts * sizeof(real); real *up = (real*) malloc(nnodes * nparts * sizeof(real)); // cpumem += nnodes * nparts * sizeof(real); (cudaMalloc((void**) &_pp, nnodes * nparts * sizeof(real))); gpumem += nnodes * nparts * sizeof(real); (cudaMalloc((void**) &_ur, nnodes * nparts * sizeof(real))); gpumem += nnodes * nparts * sizeof(real); (cudaMalloc((void**) &_ut, nnodes * nparts * sizeof(real))); gpumem += nnodes * nparts * sizeof(real); (cudaMalloc((void**) &_up, nnodes * nparts * sizeof(real))); gpumem += nnodes * nparts * sizeof(real); cuda_quad_interp(dev_start, _node_t, _node_p, nnodes, _pp, _ur, _ut, _up); printf("done.\n"); // pull fields back to host printf(" Pulling fields back to host..."); (cudaMemcpy(pp, _pp, nnodes * nparts * sizeof(real), cudaMemcpyDeviceToHost)); (cudaMemcpy(ur, _ur, nnodes * nparts * sizeof(real), cudaMemcpyDeviceToHost)); (cudaMemcpy(ut, _ut, nnodes * nparts * sizeof(real), cudaMemcpyDeviceToHost)); (cudaMemcpy(up, _up, nnodes * nparts * sizeof(real), cudaMemcpyDeviceToHost)); printf("done.\n"); for(i = 0; i < nnodes; i++) { printf("xx[%d] = %f yy[%d] = %f zz[%d] = %f\n", i, ur[i], i, ut[i], i, up[i]); } // write computed solution printf("\n Writing summarized solution to: out_%d.interp...", rec_paraview_stepnum_out); char path[FILE_NAME_SIZE] = ""; sprintf(path, "%s/output/out_%d.interp", ROOT_DIR, rec_paraview_stepnum_out); FILE *file = fopen(path, "w"); if(file == NULL) { fprintf(stderr, "Could not open file out_%d.interp", rec_paraview_stepnum_out); exit(EXIT_FAILURE); } for(int part = 0; part < nparts; part++) { p_err_min[part] = FLT_MAX; p_err_max[part] = FLT_MIN; u_err_min[part] = FLT_MAX; u_err_max[part] = FLT_MIN; v_err_min[part] = FLT_MAX; v_err_max[part] = FLT_MIN; w_err_min[part] = FLT_MAX; w_err_max[part] = FLT_MIN; fprintf(file, "parts[%d].rs = %f\n", part, parts[part].rs); fprintf(file, "%11s%11s%11s%11s%11s%11s%11s\n", "theta", "phi", "expected", "p_err", "u_err", "v_err", "w_err"); for(int n = 0; n < nnodes; n++) { real x_tmp = parts[part].rs*sin(node_t[n])*cos(node_p[n]) + parts[part].x; real y_tmp = parts[part].rs*sin(node_t[n])*sin(node_p[n]) + parts[part].y; real z_tmp = parts[part].rs*cos(node_t[n]) + parts[part].z; real pa_tmp = exp(x_tmp) + exp(y_tmp) + exp(z_tmp); real u_tmp = ur[n+part*nnodes]*sin(node_t[n])*cos(node_p[n]); u_tmp += ut[n+part*nnodes]*cos(node_t[n])*cos(node_p[n]); u_tmp -= up[n+part*nnodes]*sin(node_p[n]); real v_tmp = ur[n+part*nnodes]*sin(node_t[n])*sin(node_p[n]); v_tmp += ut[n+part*nnodes]*cos(node_t[n])*sin(node_p[n]); v_tmp += up[n+part*nnodes]*cos(node_p[n]); real w_tmp = ur[n+part*nnodes]*cos(node_t[n]); w_tmp -= ut[n+part*nnodes]*sin(node_t[n]); real p_out = (pa_tmp-pp[n+part*nnodes]) / pa_tmp; real u_out = (pa_tmp-u_tmp) / pa_tmp; real v_out = (pa_tmp-v_tmp) / pa_tmp; real w_out = (pa_tmp-w_tmp) / pa_tmp; if(fabs(p_out) < p_err_min[part]) p_err_min[part] = fabs(p_out); if(fabs(p_out) > p_err_max[part]) p_err_max[part] = fabs(p_out); if(fabs(u_out) < u_err_min[part]) u_err_min[part] = fabs(u_out); if(fabs(u_out) > u_err_max[part]) u_err_max[part] = fabs(u_out); if(fabs(v_out) < v_err_min[part]) v_err_min[part] = fabs(v_out); if(fabs(v_out) > v_err_max[part]) v_err_max[part] = fabs(v_out); if(fabs(w_out) < w_err_min[part]) w_err_min[part] = fabs(w_out); if(fabs(w_out) > w_err_max[part]) w_err_max[part] = fabs(w_out); fprintf(file, "%11.7f%11.7f%11.7f%11.3e%11.3e%11.3e%11.3e\n", node_t[n], node_p[n], pa_tmp, p_out, u_out, v_out, w_out); } } fclose(file); printf("done.\n"); printf("\n Error summary:\n"); for(int a = 0; a < nparts; a++) { printf(" Particle %d\n", a); printf(" Field component: minimum error: maximum error:\n"); printf(" p %12.3e %12.3e\n", p_err_min[a], p_err_max[a]); printf(" u %12.3e %12.3e\n", u_err_min[a], u_err_max[a]); printf(" v %12.3e %12.3e\n", v_err_min[a], v_err_max[a]); printf(" w %12.3e %12.3e\n\n", w_err_min[a], w_err_max[a]); } free(p_a); free(u_a); free(v_a); free(w_a); free(p_err_min); free(p_err_max); free(u_err_min); free(u_err_max); free(v_err_min); free(v_err_max); free(w_err_min); free(w_err_max); free(pp); free(ur); free(ut); free(up); cudaFree(_node_t); cudaFree(_node_p); cudaFree(_pp); cudaFree(_ur); cudaFree(_ut); cudaFree(_up); } extern "C" void cuda_lamb_test(void) { int i, j, k; // iterators int C; // cell locations real x, y, z; real r, theta, phi; real a = parts[0].r; real *p_a = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gcc.s3b * sizeof(real); real *p_c = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gcc.s3b * sizeof(real) real *p_e = (real*) malloc(Dom.Gcc.s3b * sizeof(real)); // error difference // cpumem += Dom.Gcc.s3b * sizeof(real) real *u_a = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfx.s3b * sizeof(real) real *u_c = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfx.s3b * sizeof(real) real *u_e = (real*) malloc(Dom.Gfx.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfx.s3b * sizeof(real) real *v_a = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfy.s3b * sizeof(real) real *v_c = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfy.s3b * sizeof(real) real *v_e = (real*) malloc(Dom.Gfy.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfy.s3b * sizeof(real) real *w_a = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // expected solution // cpumem += Dom.Gfz.s3b * sizeof(real) real *w_c = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // computed solution // cpumem += Dom.Gfz.s3b * sizeof(real) real *w_e = (real*) malloc(Dom.Gfz.s3b * sizeof(real)); // error difference // cpumem += Dom.Gfz.s3b * sizeof(real) // min and max error search real p_err_min = FLT_MAX; real p_err_max = FLT_MIN; real u_err_min = FLT_MAX; real u_err_max = FLT_MIN; real v_err_min = FLT_MAX; real v_err_max = FLT_MIN; real w_err_min = FLT_MAX; real w_err_max = FLT_MIN; printf("\nLamb's coefficient calculation validation:\n\n"); printf(" u = exp(x), v = exp(y), w = exp(z), "); printf("p = exp(x) + exp(y) + exp(z)\n\n"); real U = 0.; real V = 0.; real W = 0.01; // set up expected solution for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; x = (i-0.5)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-0.5)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-0.5)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); p_a[C] = -1.5*(U*st*cp + V*st*sp + W*ct)*a/r/r; p[C] = p_a[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; x = (i-1.0)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-0.5)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-0.5)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); u_a[C] = -0.75*a/r*(U + (U*st*cp + V*st*sp + W*ct)*st*cp) - 0.25*a*a*a/r/r/r*(U - 3.*(U*st*cp + V*st*sp + W*ct)*st*cp) + U; u[C] = u_a[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; x = (i-0.5)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-1.0)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-0.5)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); v_a[C] = -0.75*a/r*(V + (U*st*cp + V*st*sp + W*ct)*st*sp) - 0.25*a*a*a/r/r/r*(V - 3.*(U*st*cp + V*st*sp + W*ct)*st*sp) + V; v[C] = v_a[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; x = (i-0.5)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-0.5)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-1.0)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); w_a[C] = -0.75*a/r*(W + (U*st*cp + V*st*sp + W*ct)*ct) - 0.25*a*a*a/r/r/r*(W - 3.*(U*st*cp + V*st*sp + W*ct)*ct) + W; w[C] = w_a[C]; } } } // write expected solution rec_paraview_stepnum_out++; printf(" Writing expected solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK(); printf("done.\n"); // set up expected solution for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; x = (i-0.5)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-0.5)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-0.5)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); p_a[C] = -1.5*(U*st*cp + V*st*sp + W*ct)*a/r/r; p[C] = p_a[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; x = (i-1.0)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-0.5)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-0.5)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); u_a[C] = -0.75*a/r*(U + (U*st*cp + V*st*sp + W*ct)*st*cp) - 0.25*a*a*a/r/r/r*(U - 3.*(U*st*cp + V*st*sp + W*ct)*st*cp) + U; u[C] = u_a[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; x = (i-0.5)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-1.0)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-0.5)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); v_a[C] = -0.75*a/r*(V + (U*st*cp + V*st*sp + W*ct)*st*sp) - 0.25*a*a*a/r/r/r*(V - 3.*(U*st*cp + V*st*sp + W*ct)*st*sp) + V; v[C] = v_a[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; x = (i-0.5)*Dom.dx + Dom.xs;// - parts[0].x; y = (j-0.5)*Dom.dy + Dom.ys;// - parts[0].y; z = (k-1.0)*Dom.dz + Dom.zs;// - parts[0].z; r = sqrt(x*x+y*y+z*z); theta = acos(z/r); phi = acos(x/sqrt(x*x+y*y)); if(y<0.) phi = 2.*PI-phi; real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); w_a[C] = -0.75*a/r*(W + (U*st*cp + V*st*sp + W*ct)*ct) - 0.25*a*a*a/r/r/r*(W - 3.*(U*st*cp + V*st*sp + W*ct)*ct) + W; w[C] = w_a[C]; } } } // write initial fields (same as expected solution) rec_paraview_stepnum_out++; printf(" Writing initial fields to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK(); printf("done.\n"); // push fields to device printf("\n Pushing fields to devices..."); cuda_dom_push(); printf("done.\n"); // call code to test printf(" Running cuda_part_BC()..."); cuda_Lamb(); cuda_part_BC(); cuda_part_pull(); char nam[FILE_NAME_SIZE] = "lamb.rec"; recorder_lamb(nam,0); printf("done.\n"); // pull fields back to host printf(" Pulling fields back to host..."); //cuda_div_U(); cuda_dom_pull(); printf("done.\n"); // write computed solution rec_paraview_stepnum_out++; printf("\n Writing computed solution to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK(); printf("done.\n"); // copy results and compute error for(k = Dom.Gcc.ksb; k < Dom.Gcc.keb; k++) { for(j = Dom.Gcc.jsb; j < Dom.Gcc.jeb; j++) { for(i = Dom.Gcc.isb; i < Dom.Gcc.ieb; i++) { C = i + j*Dom.Gcc.s1b + k*Dom.Gcc.s2b; p_c[C] = p[C]; if(p_c[C] != 0) p_e[C] = (p_c[C] - p_a[C]) / p_c[C]; else p_e[C] = (p_c[C] - p_a[C]); if(fabs(p_e[C]) > p_err_max) p_err_max = fabs(p_e[C]); if(fabs(p_e[C]) < p_err_min) p_err_min = fabs(p_e[C]); p[C] = p_e[C]; } } } for(k = Dom.Gfx.ksb; k < Dom.Gfx.keb; k++) { for(j = Dom.Gfx.jsb; j < Dom.Gfx.jeb; j++) { for(i = Dom.Gfx.isb; i < Dom.Gfx.ieb; i++) { C = i + j*Dom.Gfx.s1b + k*Dom.Gfx.s2b; u_c[C] = u[C]; /*if(u_c[C] != 0) u_e[C] = (u_c[C] - u_a[C]) / u_c[C]; else */ u_e[C] = (u_c[C] - u_a[C]); /*if(fabs(u_e[C]) > u_err_max) u_err_max = fabs(u_e[C]); if(fabs(u_e[C]) < u_err_min) u_err_min = fabs(u_e[C]); */ u[C] = u_e[C]; } } } for(k = Dom.Gfy.ksb; k < Dom.Gfy.keb; k++) { for(j = Dom.Gfy.jsb; j < Dom.Gfy.jeb; j++) { for(i = Dom.Gfy.isb; i < Dom.Gfy.ieb; i++) { C = i + j*Dom.Gfy.s1b + k*Dom.Gfy.s2b; v_c[C] = v[C]; /*if(v_c[C] != 0) v_e[C] = (v_c[C] - v_a[C]) / v_c[C]; else */ v_e[C] = (v_c[C] - v_a[C]); /*if(fabs(v_e[C]) > v_err_max) v_err_max = fabs(v_e[C]); if(fabs(v_e[C]) < v_err_min) v_err_min = fabs(v_e[C]); */ v[C] = v_e[C]; } } } for(k = Dom.Gfz.ksb; k < Dom.Gfz.keb; k++) { for(j = Dom.Gfz.jsb; j < Dom.Gfz.jeb; j++) { for(i = Dom.Gfz.isb; i < Dom.Gfz.ieb; i++) { C = i + j*Dom.Gfz.s1b + k*Dom.Gfz.s2b; w_c[C] = w[C]; /*if(w_c[C] != 0) w_e[C] = (w_c[C] - w_a[C]) / w_c[C]; else */ w_e[C] = (w_c[C] - w_a[C]); /*if(fabs(w_e[C]) > w_err_max) w_err_max = fabs(w_e[C]); if(fabs(w_e[C]) < w_err_min) w_err_min = fabs(w_e[C]); */ w[C] = w_e[C]; } } } // write error difference rec_paraview_stepnum_out++; printf(" Writing error difference to: out_%d.pvtr...", rec_paraview_stepnum_out); out_VTK(); printf("done.\n"); printf("\n Error summary:\n"); printf(" Field variable: minimum error: maximum error:\n"); printf(" p %12.3e %12.3e\n", p_err_min, p_err_max); printf(" u %12.3e %12.3e\n", u_err_min, u_err_max); printf(" v %12.3e %12.3e\n", v_err_min, v_err_max); printf(" w %12.3e %12.3e\n\n", w_err_min, w_err_max); // clean up free(p_a); free(p_c); free(p_e); free(u_a); free(u_c); free(u_e); free(v_a); free(v_c); free(v_e); free(w_a); free(w_c); free(w_e); }
a84e9bc06e57914bd0404e9eef724bd4ecffc0d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void matrixMulCUDA3(float *C, float *A, float *B, int n) { int start_row = blockDim.y * blockIdx.y * TILE_WIDTH + threadIdx.y * TILE_WIDTH; int end_row = start_row + TILE_WIDTH; int start_col = blockDim.x * blockIdx.x * TILE_WIDTH + threadIdx.x * TILE_WIDTH; int end_col = start_col + TILE_WIDTH; for (int row = start_row; row < end_row; row++) { for (int col = start_col; col < end_col; col++) { float C_val = 0; for (int k = 0; k < n; ++k) { float A_elem = A[row * n + k]; float B_elem = B[k * n + col]; C_val += A_elem * B_elem; } C[row*n + col] = C_val; } } }
a84e9bc06e57914bd0404e9eef724bd4ecffc0d4.cu
#include "includes.h" __global__ void matrixMulCUDA3(float *C, float *A, float *B, int n) { int start_row = blockDim.y * blockIdx.y * TILE_WIDTH + threadIdx.y * TILE_WIDTH; int end_row = start_row + TILE_WIDTH; int start_col = blockDim.x * blockIdx.x * TILE_WIDTH + threadIdx.x * TILE_WIDTH; int end_col = start_col + TILE_WIDTH; for (int row = start_row; row < end_row; row++) { for (int col = start_col; col < end_col; col++) { float C_val = 0; for (int k = 0; k < n; ++k) { float A_elem = A[row * n + k]; float B_elem = B[k * n + col]; C_val += A_elem * B_elem; } C[row*n + col] = C_val; } } }
hw4-2_pitch_64.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define SIZEOFINT sizeof(int) #define BLOCK_DIM 64 #define TH_DIM 32 const int INF = ((1 << 30) - 1); int n, m, padding_n, pitch_k, Dist_row_size_in_byte; size_t pitch; int up_part_size_in_block = 0, bottom_part_size_in_block = 0, up_part_height = 0, bottom_part_height = 0; int *Dist, *Dist_s; int *Dist_cuda, *Dist_cuda0, *Dist_cuda1; void show_mat(int *start_p, int vertex_num){ for(int i = 0; i < vertex_num; i++){ for(int j = 0; j < vertex_num; j++){ if(start_p[i * vertex_num + j] == INF){ printf("INF\t "); }else{ printf("%d\t ", start_p[i * vertex_num + j]); } } printf("\n"); } } void show_mat_cuda(int *start_p, int vertex_num, int padding_n, size_t pitch, int device_id){ int *temp = (int*)malloc(SIZEOFINT * padding_n * padding_n); hipSetDevice(device_id); // hipMemcpy(temp, start_p, (SIZEOFINT * vertex_num * vertex_num), hipMemcpyDeviceToHost); hipMemcpy2D(temp, SIZEOFINT * padding_n, start_p, pitch, SIZEOFINT * padding_n, padding_n, hipMemcpyDeviceToHost); printf("---\n"); for(int i = 0; i < vertex_num; i++){ for(int j = 0; j < vertex_num; j++){ if(temp[i * vertex_num + j] == INF){ printf("INF\t "); }else{ printf("%d\t ", temp[i * vertex_num + j]); } } printf("\n"); } printf("---\n"); } void malloc_Dist(){ hipHostMalloc(&Dist, SIZEOFINT * padding_n * padding_n, hipHostMallocPortable); // Dist = (int*)malloc(SIZEOFINT * padding_n * padding_n); Dist_s = (int*)malloc(SIZEOFINT * n * n); } int getDist(int i, int j){return Dist[i * padding_n + j];} int *getDistAddr(int i, int j){return &(Dist[i * padding_n + j]);} void setDist(int i, int j, int val){Dist[i * padding_n + j] = val;} void setup_DistCuda(){ // hipMalloc((void **)&Dist_cuda, SIZEOFINT * padding_n * padding_n); // hipMemcpy(Dist_cuda, Dist, (padding_n * padding_n * SIZEOFINT), hipMemcpyHostToDevice); // hipMallocPitch(&Dist_cuda, &pitch, SIZEOFINT * padding_n, padding_n); // hipMemcpy2D(Dist_cuda, pitch, Dist, SIZEOFINT * padding_n, SIZEOFINT * padding_n, padding_n, hipMemcpyHostToDevice); // pitch_k = ((int)pitch) / SIZEOFINT; hipStream_t stream; hipStreamCreate(&stream); hipSetDevice(0); hipDeviceEnablePeerAccess(0, 0); hipMallocPitch(&Dist_cuda0, &pitch, SIZEOFINT * padding_n, padding_n); hipMemcpy2DAsync(Dist_cuda0, pitch, Dist, SIZEOFINT * padding_n, SIZEOFINT * padding_n, padding_n, hipMemcpyHostToDevice, stream); pitch_k = ((int)pitch) / SIZEOFINT; hipSetDevice(1); hipDeviceEnablePeerAccess(1, 0); hipMallocPitch(&Dist_cuda1, &pitch, SIZEOFINT * padding_n, padding_n); hipMemcpy2D(Dist_cuda1, pitch, Dist, SIZEOFINT * padding_n, SIZEOFINT * padding_n, padding_n, hipMemcpyHostToDevice); hipStreamDestroy(stream); } void back_DistCuda(){ // hipMemcpy(Dist, Dist_cuda, (padding_n * padding_n * SIZEOFINT), hipMemcpyDeviceToHost); // hipMemcpy2D(Dist, SIZEOFINT * padding_n, Dist_cuda, pitch, SIZEOFINT * padding_n, padding_n, hipMemcpyDeviceToHost); // hipStream_t stream; // hipStreamCreate(&stream); hipSetDevice(0); hipMemcpy2D(Dist, SIZEOFINT * padding_n, Dist_cuda0, pitch, SIZEOFINT * padding_n, padding_n, hipMemcpyDeviceToHost); hipSetDevice(1); hipMemcpy2D(&(Dist[up_part_height * padding_n]), SIZEOFINT * padding_n, &(Dist_cuda1[up_part_height * pitch_k]), pitch, SIZEOFINT * padding_n, (bottom_part_height), hipMemcpyDeviceToHost); // hipStreamDestroy(stream); } void input(char* infile) { FILE* file = fopen(infile, "rb"); fread(&n, sizeof(int), 1, file); fread(&m, sizeof(int), 1, file); padding_n = ((n + BLOCK_DIM - 1) / BLOCK_DIM) * BLOCK_DIM; Dist_row_size_in_byte = SIZEOFINT * padding_n; malloc_Dist(); for (int i = 0; i < padding_n; i++) { for (int j = 0; j < padding_n; j++) { if (i == j) { setDist(i, j, 0); // Dist[i][j] = 0; } else { setDist(i, j, INF); // Dist[i][j] = INF; } } } int *edges_buf = (int*)malloc(3 * m * SIZEOFINT); fread(edges_buf, sizeof(int), 3 * m, file); for (int i = 0; i < m; i++) { // fread(pair, sizeof(int), 3, file); setDist(edges_buf[3 * i], edges_buf[3 * i + 1], edges_buf[3 * i + 2]); } free(edges_buf); fclose(file); } void output(char* outFileName) { FILE* outfile = fopen(outFileName, "w"); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { // if (Dist[i][j] >= INF) Dist[i][j] = INF; if (getDist(i, j) >= INF) setDist(i, j, INF); Dist_s[i * n + j] = getDist(i, j); } // fwrite(Dist[i], sizeof(int), n, outfile); // fwrite(getDistAddr(i, 0), SIZEOFINT, n, outfile); } fwrite(Dist_s, sizeof(int), n * n, outfile); fclose(outfile); } __forceinline__ __device__ void block_calc(int* C, int* A, int* B, int bj, int bi) { for (int k = 0; k < BLOCK_DIM; k++) { int sum0 = A[bi*BLOCK_DIM + k] + B[k*BLOCK_DIM + bj]; int sum1 = A[(bi + TH_DIM)*BLOCK_DIM + k] + B[k*BLOCK_DIM + bj]; int sum2 = A[bi*BLOCK_DIM + k] + B[k*BLOCK_DIM + (bj + TH_DIM)]; int sum3 = A[(bi + TH_DIM)*BLOCK_DIM + k] + B[k*BLOCK_DIM + (bj + TH_DIM)]; C[bi*BLOCK_DIM + bj] = min(C[bi*BLOCK_DIM + bj], sum0); C[(bi + TH_DIM)*BLOCK_DIM + bj] = min(C[(bi + TH_DIM)*BLOCK_DIM + bj], sum1); C[bi*BLOCK_DIM + (bj + TH_DIM)] = min(C[bi*BLOCK_DIM + (bj + TH_DIM)], sum2); C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = min(C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)], sum3); __syncthreads(); } } __forceinline__ __device__ void block_calc_rev_async(int* C, int* A, int* B, int bj, int bi) { #pragma unroll 10 for (int k = 0; k < BLOCK_DIM; k++) { int sum0 = A[k*BLOCK_DIM + bi] + B[k*BLOCK_DIM + bj]; int sum1 = A[k*BLOCK_DIM + (bi + TH_DIM)] + B[k*BLOCK_DIM + bj]; int sum2 = A[k*BLOCK_DIM + bi] + B[k*BLOCK_DIM + (bj + TH_DIM)]; int sum3 = A[k*BLOCK_DIM + (bi + TH_DIM)] + B[k*BLOCK_DIM + (bj + TH_DIM)]; C[bi*BLOCK_DIM + bj] = min(C[bi*BLOCK_DIM + bj], sum0); C[(bi + TH_DIM)*BLOCK_DIM + bj] = min(C[(bi + TH_DIM)*BLOCK_DIM + bj], sum1); C[bi*BLOCK_DIM + (bj + TH_DIM)] = min(C[bi*BLOCK_DIM + (bj + TH_DIM)], sum2); C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = min(C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)], sum3); } } __global__ void floyd_warshall_block_kernel_phase1(int n, int k, int* graph) { const unsigned int bi = threadIdx.y; const unsigned int bj = threadIdx.x; __shared__ int C[BLOCK_DIM * BLOCK_DIM]; // Transfer to temp shared arrays C[bi*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)]; C[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)]; C[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))]; C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))]; __syncthreads(); block_calc(C, C, C, bi, bj); // Transfer back to graph graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)] = C[bi*BLOCK_DIM + bj]; graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)] = C[(bi + TH_DIM)*BLOCK_DIM + bj]; graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[bi*BLOCK_DIM + (bj + TH_DIM)]; graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)]; } __global__ void floyd_warshall_block_kernel_phase2(int n, int k, int* graph) { // BlockDim is one dimensional (Straight along diagonal) // Blocks themselves are two dimensional // Phase 2 1/2 const unsigned int i = blockIdx.x; const unsigned int bi = threadIdx.y; const unsigned int bj = threadIdx.x; __shared__ int A[BLOCK_DIM * BLOCK_DIM]; __shared__ int B[BLOCK_DIM * BLOCK_DIM]; __shared__ int C[BLOCK_DIM * BLOCK_DIM]; C[bi*BLOCK_DIM + bj] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)]; C[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)]; C[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))]; C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))]; B[bi*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)]; B[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)]; B[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))]; B[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))]; __syncthreads(); block_calc(C, C, B, bi, bj); graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)] = C[bi*BLOCK_DIM + bj]; graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)] = C[(bi + TH_DIM)*BLOCK_DIM + bj]; graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[bi*BLOCK_DIM + (bj + TH_DIM)]; graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)]; // Phase 2 2/2 C[bi*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)]; C[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)]; C[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))]; C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))]; __syncthreads(); block_calc(C, B, C, bi, bj); // Block C is the only one that could be changed graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)] = C[bi*BLOCK_DIM + bj]; graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)] = C[(bi + TH_DIM)*BLOCK_DIM + bj]; graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[bi*BLOCK_DIM + (bj + TH_DIM)]; graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)]; } __global__ void floyd_warshall_block_kernel_phase3(int n, int k, int* graph, int start_x, int start_y) { // BlockDim is one dimensional (Straight along diagonal) // Blocks themselves are two dimensional const unsigned int j = start_x + blockIdx.x; const unsigned int i = start_y + blockIdx.y; const unsigned int bi = threadIdx.y; const unsigned int bj = threadIdx.x; __shared__ int A[BLOCK_DIM * BLOCK_DIM]; __shared__ int B[BLOCK_DIM * BLOCK_DIM]; __shared__ int C[BLOCK_DIM * BLOCK_DIM]; C[bi*BLOCK_DIM + bj] = graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + bj)]; C[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + bj)]; C[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + (bj + TH_DIM))]; C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + (bj + TH_DIM))]; A[bj*BLOCK_DIM + bi] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)]; A[bj*BLOCK_DIM + (bi + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)]; A[(bj + TH_DIM)*BLOCK_DIM + bi] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))]; A[(bj + TH_DIM)*BLOCK_DIM + (bi + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))]; B[bi*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + bj)]; B[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + bj)]; B[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + (bj + TH_DIM))]; B[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + (bj + TH_DIM))]; __syncthreads(); block_calc_rev_async(C, A, B, bi, bj); __syncthreads(); graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + bj)] = C[bi*BLOCK_DIM + bj]; graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + bj)] = C[(bi + TH_DIM)*BLOCK_DIM + bj]; graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + (bj + TH_DIM))] = C[bi*BLOCK_DIM + (bj + TH_DIM)]; graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)]; } __global__ void floyd_warshall_block_kernel_phase21(int n, int k, int* graph, int start) { // BlockDim is one dimensional (Straight along diagonal) // Blocks themselves are two dimensional // Phase 2 1/2, update column // const unsigned int i = blockIdx.x; const unsigned int i = start + blockIdx.x; const unsigned int bi = threadIdx.y; const unsigned int bj = threadIdx.x; __shared__ int A[BLOCK_DIM * BLOCK_DIM]; __shared__ int B[BLOCK_DIM * BLOCK_DIM]; __shared__ int C[BLOCK_DIM * BLOCK_DIM]; C[bi*BLOCK_DIM + bj] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)]; C[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)]; C[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))]; C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))]; B[bi*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)]; B[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)]; B[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))]; B[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))]; __syncthreads(); block_calc(C, C, B, bi, bj); graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)] = C[bi*BLOCK_DIM + bj]; graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)] = C[(bi + TH_DIM)*BLOCK_DIM + bj]; graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[bi*BLOCK_DIM + (bj + TH_DIM)]; graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)]; } __global__ void floyd_warshall_block_kernel_phase22(int n, int k, int* graph, int start) { // BlockDim is one dimensional (Straight along diagonal) // Blocks themselves are two dimensional // Phase 2 2/2, update row // const unsigned int i = blockIdx.x; const unsigned int i = start + blockIdx.x; const unsigned int bi = threadIdx.y; const unsigned int bj = threadIdx.x; __shared__ int A[BLOCK_DIM * BLOCK_DIM]; __shared__ int B[BLOCK_DIM * BLOCK_DIM]; __shared__ int C[BLOCK_DIM * BLOCK_DIM]; B[bi*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)]; B[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)]; B[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))]; B[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))]; C[bi*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)]; C[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)]; C[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))]; C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))]; __syncthreads(); block_calc(C, B, C, bi, bj); // Block C is the only one that could be changed graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)] = C[bi*BLOCK_DIM + bj]; graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)] = C[(bi + TH_DIM)*BLOCK_DIM + bj]; graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[bi*BLOCK_DIM + (bj + TH_DIM)]; graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)]; } void block_FW_cuda() { // int round = padding_n / B; const int blocks = padding_n / BLOCK_DIM; dim3 block_dim(TH_DIM, TH_DIM, 1); dim3 phase3_grid(blocks, blocks, 1); // for (int k = 0; k < blocks; k++) { // hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase1), dim3(1), dim3(block_dim), 0, 0, pitch_k, k, Dist_cuda); // floyd_warshall_block_kernel_phase2<<<blocks, block_dim>>>(pitch_k, k, Dist_cuda); // floyd_warshall_block_kernel_phase3<<<phase3_grid, block_dim>>>(pitch_k, k, Dist_cuda, 0, 0); // } const int row_size_pitchk = BLOCK_DIM * pitch_k; up_part_size_in_block = (blocks+1)/2; bottom_part_size_in_block = blocks/2; up_part_height = BLOCK_DIM * up_part_size_in_block; bottom_part_height = BLOCK_DIM * bottom_part_size_in_block; dim3 phase31_grid(blocks, up_part_size_in_block, 1); dim3 phase32_grid(blocks, bottom_part_size_in_block, 1); printf("Up Blocks: %d, Bottom Blocks: %d\n", up_part_size_in_block, bottom_part_size_in_block); for (int k = 0; k < blocks; k++) { int next_k = k + 1; // Phase 1 hipSetDevice(0); hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase1), dim3(1), dim3(block_dim), 0, 0, pitch_k, k, Dist_cuda0); hipSetDevice(1); hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase1), dim3(1), dim3(block_dim), 0, 0, pitch_k, k, Dist_cuda1); // Phase 2 hipStream_t stream; hipStreamCreate(&stream); hipSetDevice(0); // floyd_warshall_block_kernel_phase2<<<blocks, block_dim>>>(pitch_k, k, Dist_cuda0); hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase21), dim3(up_part_size_in_block), dim3(block_dim), 0, 0, pitch_k, k, Dist_cuda0, 0); hipSetDevice(1); // floyd_warshall_block_kernel_phase2<<<blocks, block_dim>>>(pitch_k, k, Dist_cuda1); hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase21), dim3(bottom_part_size_in_block), dim3(block_dim), 0, 0, pitch_k, k, Dist_cuda1, up_part_size_in_block); // Calculate rows of phase 2 if(k < up_part_size_in_block){ hipSetDevice(0); hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase22), dim3(blocks), dim3(block_dim), 0, stream, pitch_k, k, Dist_cuda0, 0); }else{ hipSetDevice(1); hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase22), dim3(blocks), dim3(block_dim), 0, stream, pitch_k, k, Dist_cuda1, 0); } hipStreamDestroy(stream); // Phase 3 hipSetDevice(0); hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase3), dim3(phase31_grid), dim3(block_dim), 0, 0, pitch_k, k, Dist_cuda0, 0, 0); hipSetDevice(1); hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase3), dim3(phase32_grid), dim3(block_dim), 0, 0, pitch_k, k, Dist_cuda1, 0, up_part_size_in_block); // Transfer data to another GPU if(next_k < up_part_size_in_block){ // printf("Up K: %d, Next_K: %d, Blocks: %d\n", k, next_k, blocks); hipMemcpyPeer(&(Dist_cuda1[next_k * row_size_pitchk]), 1, &(Dist_cuda0[next_k * row_size_pitchk]), 0, SIZEOFINT * row_size_pitchk); }else if(next_k < blocks){ // printf("Down K: %d, Next_K: %d, Blocks: %d\n", k, next_k, blocks); hipMemcpyPeer(&(Dist_cuda0[next_k * row_size_pitchk]), 0, &(Dist_cuda1[next_k * row_size_pitchk]), 1, SIZEOFINT * row_size_pitchk); } } } int main(int argc, char* argv[]) { input(argv[1]); // show_mat(getDistAddr(0, 0), n); setup_DistCuda(); // printf("Vertice: %d, Edge: %d, B: %d, Padding: %d\n", n, m, BLOCK_DIM, padding_n); block_FW_cuda(); back_DistCuda(); // show_mat(getDistAddr(0, 0), n); output(argv[2]); // show_mat(getDistAddr(0, 0), n); return 0; }
hw4-2_pitch_64.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <cuda.h> #define SIZEOFINT sizeof(int) #define BLOCK_DIM 64 #define TH_DIM 32 const int INF = ((1 << 30) - 1); int n, m, padding_n, pitch_k, Dist_row_size_in_byte; size_t pitch; int up_part_size_in_block = 0, bottom_part_size_in_block = 0, up_part_height = 0, bottom_part_height = 0; int *Dist, *Dist_s; int *Dist_cuda, *Dist_cuda0, *Dist_cuda1; void show_mat(int *start_p, int vertex_num){ for(int i = 0; i < vertex_num; i++){ for(int j = 0; j < vertex_num; j++){ if(start_p[i * vertex_num + j] == INF){ printf("INF\t "); }else{ printf("%d\t ", start_p[i * vertex_num + j]); } } printf("\n"); } } void show_mat_cuda(int *start_p, int vertex_num, int padding_n, size_t pitch, int device_id){ int *temp = (int*)malloc(SIZEOFINT * padding_n * padding_n); cudaSetDevice(device_id); // cudaMemcpy(temp, start_p, (SIZEOFINT * vertex_num * vertex_num), cudaMemcpyDeviceToHost); cudaMemcpy2D(temp, SIZEOFINT * padding_n, start_p, pitch, SIZEOFINT * padding_n, padding_n, cudaMemcpyDeviceToHost); printf("---\n"); for(int i = 0; i < vertex_num; i++){ for(int j = 0; j < vertex_num; j++){ if(temp[i * vertex_num + j] == INF){ printf("INF\t "); }else{ printf("%d\t ", temp[i * vertex_num + j]); } } printf("\n"); } printf("---\n"); } void malloc_Dist(){ cudaHostAlloc(&Dist, SIZEOFINT * padding_n * padding_n, cudaHostAllocPortable); // Dist = (int*)malloc(SIZEOFINT * padding_n * padding_n); Dist_s = (int*)malloc(SIZEOFINT * n * n); } int getDist(int i, int j){return Dist[i * padding_n + j];} int *getDistAddr(int i, int j){return &(Dist[i * padding_n + j]);} void setDist(int i, int j, int val){Dist[i * padding_n + j] = val;} void setup_DistCuda(){ // cudaMalloc((void **)&Dist_cuda, SIZEOFINT * padding_n * padding_n); // cudaMemcpy(Dist_cuda, Dist, (padding_n * padding_n * SIZEOFINT), cudaMemcpyHostToDevice); // cudaMallocPitch(&Dist_cuda, &pitch, SIZEOFINT * padding_n, padding_n); // cudaMemcpy2D(Dist_cuda, pitch, Dist, SIZEOFINT * padding_n, SIZEOFINT * padding_n, padding_n, cudaMemcpyHostToDevice); // pitch_k = ((int)pitch) / SIZEOFINT; cudaStream_t stream; cudaStreamCreate(&stream); cudaSetDevice(0); cudaDeviceEnablePeerAccess(0, 0); cudaMallocPitch(&Dist_cuda0, &pitch, SIZEOFINT * padding_n, padding_n); cudaMemcpy2DAsync(Dist_cuda0, pitch, Dist, SIZEOFINT * padding_n, SIZEOFINT * padding_n, padding_n, cudaMemcpyHostToDevice, stream); pitch_k = ((int)pitch) / SIZEOFINT; cudaSetDevice(1); cudaDeviceEnablePeerAccess(1, 0); cudaMallocPitch(&Dist_cuda1, &pitch, SIZEOFINT * padding_n, padding_n); cudaMemcpy2D(Dist_cuda1, pitch, Dist, SIZEOFINT * padding_n, SIZEOFINT * padding_n, padding_n, cudaMemcpyHostToDevice); cudaStreamDestroy(stream); } void back_DistCuda(){ // cudaMemcpy(Dist, Dist_cuda, (padding_n * padding_n * SIZEOFINT), cudaMemcpyDeviceToHost); // cudaMemcpy2D(Dist, SIZEOFINT * padding_n, Dist_cuda, pitch, SIZEOFINT * padding_n, padding_n, cudaMemcpyDeviceToHost); // cudaStream_t stream; // cudaStreamCreate(&stream); cudaSetDevice(0); cudaMemcpy2D(Dist, SIZEOFINT * padding_n, Dist_cuda0, pitch, SIZEOFINT * padding_n, padding_n, cudaMemcpyDeviceToHost); cudaSetDevice(1); cudaMemcpy2D(&(Dist[up_part_height * padding_n]), SIZEOFINT * padding_n, &(Dist_cuda1[up_part_height * pitch_k]), pitch, SIZEOFINT * padding_n, (bottom_part_height), cudaMemcpyDeviceToHost); // cudaStreamDestroy(stream); } void input(char* infile) { FILE* file = fopen(infile, "rb"); fread(&n, sizeof(int), 1, file); fread(&m, sizeof(int), 1, file); padding_n = ((n + BLOCK_DIM - 1) / BLOCK_DIM) * BLOCK_DIM; Dist_row_size_in_byte = SIZEOFINT * padding_n; malloc_Dist(); for (int i = 0; i < padding_n; i++) { for (int j = 0; j < padding_n; j++) { if (i == j) { setDist(i, j, 0); // Dist[i][j] = 0; } else { setDist(i, j, INF); // Dist[i][j] = INF; } } } int *edges_buf = (int*)malloc(3 * m * SIZEOFINT); fread(edges_buf, sizeof(int), 3 * m, file); for (int i = 0; i < m; i++) { // fread(pair, sizeof(int), 3, file); setDist(edges_buf[3 * i], edges_buf[3 * i + 1], edges_buf[3 * i + 2]); } free(edges_buf); fclose(file); } void output(char* outFileName) { FILE* outfile = fopen(outFileName, "w"); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { // if (Dist[i][j] >= INF) Dist[i][j] = INF; if (getDist(i, j) >= INF) setDist(i, j, INF); Dist_s[i * n + j] = getDist(i, j); } // fwrite(Dist[i], sizeof(int), n, outfile); // fwrite(getDistAddr(i, 0), SIZEOFINT, n, outfile); } fwrite(Dist_s, sizeof(int), n * n, outfile); fclose(outfile); } __forceinline__ __device__ void block_calc(int* C, int* A, int* B, int bj, int bi) { for (int k = 0; k < BLOCK_DIM; k++) { int sum0 = A[bi*BLOCK_DIM + k] + B[k*BLOCK_DIM + bj]; int sum1 = A[(bi + TH_DIM)*BLOCK_DIM + k] + B[k*BLOCK_DIM + bj]; int sum2 = A[bi*BLOCK_DIM + k] + B[k*BLOCK_DIM + (bj + TH_DIM)]; int sum3 = A[(bi + TH_DIM)*BLOCK_DIM + k] + B[k*BLOCK_DIM + (bj + TH_DIM)]; C[bi*BLOCK_DIM + bj] = min(C[bi*BLOCK_DIM + bj], sum0); C[(bi + TH_DIM)*BLOCK_DIM + bj] = min(C[(bi + TH_DIM)*BLOCK_DIM + bj], sum1); C[bi*BLOCK_DIM + (bj + TH_DIM)] = min(C[bi*BLOCK_DIM + (bj + TH_DIM)], sum2); C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = min(C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)], sum3); __syncthreads(); } } __forceinline__ __device__ void block_calc_rev_async(int* C, int* A, int* B, int bj, int bi) { #pragma unroll 10 for (int k = 0; k < BLOCK_DIM; k++) { int sum0 = A[k*BLOCK_DIM + bi] + B[k*BLOCK_DIM + bj]; int sum1 = A[k*BLOCK_DIM + (bi + TH_DIM)] + B[k*BLOCK_DIM + bj]; int sum2 = A[k*BLOCK_DIM + bi] + B[k*BLOCK_DIM + (bj + TH_DIM)]; int sum3 = A[k*BLOCK_DIM + (bi + TH_DIM)] + B[k*BLOCK_DIM + (bj + TH_DIM)]; C[bi*BLOCK_DIM + bj] = min(C[bi*BLOCK_DIM + bj], sum0); C[(bi + TH_DIM)*BLOCK_DIM + bj] = min(C[(bi + TH_DIM)*BLOCK_DIM + bj], sum1); C[bi*BLOCK_DIM + (bj + TH_DIM)] = min(C[bi*BLOCK_DIM + (bj + TH_DIM)], sum2); C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = min(C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)], sum3); } } __global__ void floyd_warshall_block_kernel_phase1(int n, int k, int* graph) { const unsigned int bi = threadIdx.y; const unsigned int bj = threadIdx.x; __shared__ int C[BLOCK_DIM * BLOCK_DIM]; // Transfer to temp shared arrays C[bi*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)]; C[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)]; C[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))]; C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))]; __syncthreads(); block_calc(C, C, C, bi, bj); // Transfer back to graph graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)] = C[bi*BLOCK_DIM + bj]; graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)] = C[(bi + TH_DIM)*BLOCK_DIM + bj]; graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[bi*BLOCK_DIM + (bj + TH_DIM)]; graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)]; } __global__ void floyd_warshall_block_kernel_phase2(int n, int k, int* graph) { // BlockDim is one dimensional (Straight along diagonal) // Blocks themselves are two dimensional // Phase 2 1/2 const unsigned int i = blockIdx.x; const unsigned int bi = threadIdx.y; const unsigned int bj = threadIdx.x; __shared__ int A[BLOCK_DIM * BLOCK_DIM]; __shared__ int B[BLOCK_DIM * BLOCK_DIM]; __shared__ int C[BLOCK_DIM * BLOCK_DIM]; C[bi*BLOCK_DIM + bj] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)]; C[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)]; C[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))]; C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))]; B[bi*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)]; B[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)]; B[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))]; B[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))]; __syncthreads(); block_calc(C, C, B, bi, bj); graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)] = C[bi*BLOCK_DIM + bj]; graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)] = C[(bi + TH_DIM)*BLOCK_DIM + bj]; graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[bi*BLOCK_DIM + (bj + TH_DIM)]; graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)]; // Phase 2 2/2 C[bi*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)]; C[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)]; C[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))]; C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))]; __syncthreads(); block_calc(C, B, C, bi, bj); // Block C is the only one that could be changed graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)] = C[bi*BLOCK_DIM + bj]; graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)] = C[(bi + TH_DIM)*BLOCK_DIM + bj]; graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[bi*BLOCK_DIM + (bj + TH_DIM)]; graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)]; } __global__ void floyd_warshall_block_kernel_phase3(int n, int k, int* graph, int start_x, int start_y) { // BlockDim is one dimensional (Straight along diagonal) // Blocks themselves are two dimensional const unsigned int j = start_x + blockIdx.x; const unsigned int i = start_y + blockIdx.y; const unsigned int bi = threadIdx.y; const unsigned int bj = threadIdx.x; __shared__ int A[BLOCK_DIM * BLOCK_DIM]; __shared__ int B[BLOCK_DIM * BLOCK_DIM]; __shared__ int C[BLOCK_DIM * BLOCK_DIM]; C[bi*BLOCK_DIM + bj] = graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + bj)]; C[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + bj)]; C[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + (bj + TH_DIM))]; C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + (bj + TH_DIM))]; A[bj*BLOCK_DIM + bi] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)]; A[bj*BLOCK_DIM + (bi + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)]; A[(bj + TH_DIM)*BLOCK_DIM + bi] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))]; A[(bj + TH_DIM)*BLOCK_DIM + (bi + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))]; B[bi*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + bj)]; B[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + bj)]; B[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + (bj + TH_DIM))]; B[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + (bj + TH_DIM))]; __syncthreads(); block_calc_rev_async(C, A, B, bi, bj); __syncthreads(); graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + bj)] = C[bi*BLOCK_DIM + bj]; graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + bj)] = C[(bi + TH_DIM)*BLOCK_DIM + bj]; graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + (bj + TH_DIM))] = C[bi*BLOCK_DIM + (bj + TH_DIM)]; graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)]; } __global__ void floyd_warshall_block_kernel_phase21(int n, int k, int* graph, int start) { // BlockDim is one dimensional (Straight along diagonal) // Blocks themselves are two dimensional // Phase 2 1/2, update column // const unsigned int i = blockIdx.x; const unsigned int i = start + blockIdx.x; const unsigned int bi = threadIdx.y; const unsigned int bj = threadIdx.x; __shared__ int A[BLOCK_DIM * BLOCK_DIM]; __shared__ int B[BLOCK_DIM * BLOCK_DIM]; __shared__ int C[BLOCK_DIM * BLOCK_DIM]; C[bi*BLOCK_DIM + bj] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)]; C[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)]; C[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))]; C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))]; B[bi*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)]; B[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)]; B[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))]; B[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))]; __syncthreads(); block_calc(C, C, B, bi, bj); graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)] = C[bi*BLOCK_DIM + bj]; graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)] = C[(bi + TH_DIM)*BLOCK_DIM + bj]; graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[bi*BLOCK_DIM + (bj + TH_DIM)]; graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)]; } __global__ void floyd_warshall_block_kernel_phase22(int n, int k, int* graph, int start) { // BlockDim is one dimensional (Straight along diagonal) // Blocks themselves are two dimensional // Phase 2 2/2, update row // const unsigned int i = blockIdx.x; const unsigned int i = start + blockIdx.x; const unsigned int bi = threadIdx.y; const unsigned int bj = threadIdx.x; __shared__ int A[BLOCK_DIM * BLOCK_DIM]; __shared__ int B[BLOCK_DIM * BLOCK_DIM]; __shared__ int C[BLOCK_DIM * BLOCK_DIM]; B[bi*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)]; B[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)]; B[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))]; B[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))]; C[bi*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)]; C[(bi + TH_DIM)*BLOCK_DIM + bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)]; C[bi*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))]; C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))]; __syncthreads(); block_calc(C, B, C, bi, bj); // Block C is the only one that could be changed graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)] = C[bi*BLOCK_DIM + bj]; graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)] = C[(bi + TH_DIM)*BLOCK_DIM + bj]; graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[bi*BLOCK_DIM + (bj + TH_DIM)]; graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)]; } void block_FW_cuda() { // int round = padding_n / B; const int blocks = padding_n / BLOCK_DIM; dim3 block_dim(TH_DIM, TH_DIM, 1); dim3 phase3_grid(blocks, blocks, 1); // for (int k = 0; k < blocks; k++) { // floyd_warshall_block_kernel_phase1<<<1, block_dim>>>(pitch_k, k, Dist_cuda); // floyd_warshall_block_kernel_phase2<<<blocks, block_dim>>>(pitch_k, k, Dist_cuda); // floyd_warshall_block_kernel_phase3<<<phase3_grid, block_dim>>>(pitch_k, k, Dist_cuda, 0, 0); // } const int row_size_pitchk = BLOCK_DIM * pitch_k; up_part_size_in_block = (blocks+1)/2; bottom_part_size_in_block = blocks/2; up_part_height = BLOCK_DIM * up_part_size_in_block; bottom_part_height = BLOCK_DIM * bottom_part_size_in_block; dim3 phase31_grid(blocks, up_part_size_in_block, 1); dim3 phase32_grid(blocks, bottom_part_size_in_block, 1); printf("Up Blocks: %d, Bottom Blocks: %d\n", up_part_size_in_block, bottom_part_size_in_block); for (int k = 0; k < blocks; k++) { int next_k = k + 1; // Phase 1 cudaSetDevice(0); floyd_warshall_block_kernel_phase1<<<1, block_dim>>>(pitch_k, k, Dist_cuda0); cudaSetDevice(1); floyd_warshall_block_kernel_phase1<<<1, block_dim>>>(pitch_k, k, Dist_cuda1); // Phase 2 cudaStream_t stream; cudaStreamCreate(&stream); cudaSetDevice(0); // floyd_warshall_block_kernel_phase2<<<blocks, block_dim>>>(pitch_k, k, Dist_cuda0); floyd_warshall_block_kernel_phase21<<<up_part_size_in_block, block_dim, 0>>>(pitch_k, k, Dist_cuda0, 0); cudaSetDevice(1); // floyd_warshall_block_kernel_phase2<<<blocks, block_dim>>>(pitch_k, k, Dist_cuda1); floyd_warshall_block_kernel_phase21<<<bottom_part_size_in_block, block_dim, 0>>>(pitch_k, k, Dist_cuda1, up_part_size_in_block); // Calculate rows of phase 2 if(k < up_part_size_in_block){ cudaSetDevice(0); floyd_warshall_block_kernel_phase22<<<blocks, block_dim, 0, stream>>>(pitch_k, k, Dist_cuda0, 0); }else{ cudaSetDevice(1); floyd_warshall_block_kernel_phase22<<<blocks, block_dim, 0, stream>>>(pitch_k, k, Dist_cuda1, 0); } cudaStreamDestroy(stream); // Phase 3 cudaSetDevice(0); floyd_warshall_block_kernel_phase3<<<phase31_grid, block_dim>>>(pitch_k, k, Dist_cuda0, 0, 0); cudaSetDevice(1); floyd_warshall_block_kernel_phase3<<<phase32_grid, block_dim>>>(pitch_k, k, Dist_cuda1, 0, up_part_size_in_block); // Transfer data to another GPU if(next_k < up_part_size_in_block){ // printf("Up K: %d, Next_K: %d, Blocks: %d\n", k, next_k, blocks); cudaMemcpyPeer(&(Dist_cuda1[next_k * row_size_pitchk]), 1, &(Dist_cuda0[next_k * row_size_pitchk]), 0, SIZEOFINT * row_size_pitchk); }else if(next_k < blocks){ // printf("Down K: %d, Next_K: %d, Blocks: %d\n", k, next_k, blocks); cudaMemcpyPeer(&(Dist_cuda0[next_k * row_size_pitchk]), 0, &(Dist_cuda1[next_k * row_size_pitchk]), 1, SIZEOFINT * row_size_pitchk); } } } int main(int argc, char* argv[]) { input(argv[1]); // show_mat(getDistAddr(0, 0), n); setup_DistCuda(); // printf("Vertice: %d, Edge: %d, B: %d, Padding: %d\n", n, m, BLOCK_DIM, padding_n); block_FW_cuda(); back_DistCuda(); // show_mat(getDistAddr(0, 0), n); output(argv[2]); // show_mat(getDistAddr(0, 0), n); return 0; }
ad89e8e40ee8f23dd98bfbaab60258fa7278c2cf.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include <stdio.h> #include <hip/hip_runtime.h> #include <assert.h> #include <iostream> #include "dataio.h" using namespace std; #ifndef FT #define FT float #define FT2 float2 #define make_FT2 make_float2 #endif #ifndef IFT #define IFT float #endif #ifndef NN #define NN 4 #endif #ifndef BB #define BB 4 #endif #ifndef GPUID #define GPUID 0 #endif #define CUERR { hipError_t err; \ if ((err = hipGetLastError()) != hipSuccess) { \ printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__); \ return -1; }} // Block index #define bx blockIdx.x #define by blockIdx.y // Thread index #define tx threadIdx.x // Possible values are 2, 4, 8 and 16 #ifndef R #define R 2 #endif inline FT2 __device__ operator*( FT2 a, FT2 b ) { return make_FT2( a.x*b.x-a.y*b.y, a.x*b.y+a.y*b.x ); } inline FT2 __device__ operator+( FT2 a, FT2 b ) { return make_FT2( a.x + b.x, a.y + b.y ); } inline FT2 __device__ operator-( FT2 a, FT2 b ) { return make_FT2( a.x - b.x, a.y - b.y ); } inline FT2 __device__ operator*( FT2 a, FT b ) { return make_FT2( b*a.x , b*a.y); } #define COS_PI_8 0.923879533f #define SIN_PI_8 0.382683432f #define exp_1_16 make_FT2( COS_PI_8, -SIN_PI_8 ) #define exp_3_16 make_FT2( SIN_PI_8, -COS_PI_8 ) #define exp_5_16 make_FT2( -SIN_PI_8, -COS_PI_8 ) #define exp_7_16 make_FT2( -COS_PI_8, -SIN_PI_8 ) #define exp_9_16 make_FT2( -COS_PI_8, SIN_PI_8 ) #define exp_1_8 make_FT2( 1, -1 ) #define exp_1_4 make_FT2( 0, -1 ) #define exp_3_8 make_FT2( -1, -1 ) void inputData(FILE* fid, FT2* dat, int num_ft2) { assert(sizeof(FT) == 4 || sizeof(FT) == 8); if (fid == NULL) { fprintf(stderr, "Cannot open input file\n"); exit(-1); } for (unsigned int i = 0 ; i < num_ft2 ; i++) { IFT in_data; fread(&(in_data), sizeof(IFT), 1, fid); if (sizeof(FT) == 4) dat[i].x = (float) in_data; else // (sizeof(FT) == 8) dat[i].x = (double) in_data; fread(&(in_data), sizeof(IFT), 1, fid); if (sizeof(FT) == 4) dat[i].y = (float) in_data; else // (sizeof(FT) == 8) dat[i].y = (double) in_data; } } __device__ void GPU_FFT2( FT2 &v1,FT2 &v2 ) { FT2 v0 = v1; v1 = v0 + v2; v2 = v0 - v2; } __device__ void GPU_FFT4( FT2 &v0,FT2 &v1,FT2 &v2,FT2 &v3) { GPU_FFT2(v0, v2); GPU_FFT2(v1, v3); v3 = v3 * exp_1_4; GPU_FFT2(v0, v1); GPU_FFT2(v2, v3); } inline __device__ void GPU_FFT2(FT2* v){ GPU_FFT2(v[0],v[1]); } inline __device__ void GPU_FFT4(FT2* v){ GPU_FFT4(v[0],v[1],v[2],v[3] ); } inline __device__ void GPU_FFT8(FT2* v){ GPU_FFT2(v[0],v[4]); GPU_FFT2(v[1],v[5]); GPU_FFT2(v[2],v[6]); GPU_FFT2(v[3],v[7]); v[5]=(v[5]*exp_1_8)*M_SQRT1_2; v[6]=v[6]*exp_1_4; v[7]=(v[7]*exp_3_8)*M_SQRT1_2; GPU_FFT4(v[0],v[1],v[2],v[3]); GPU_FFT4(v[4],v[5],v[6],v[7]); } inline __device__ void GPU_FFT16( FT2 *v ) { GPU_FFT4( v[0], v[4], v[8], v[12] ); GPU_FFT4( v[1], v[5], v[9], v[13] ); GPU_FFT4( v[2], v[6], v[10], v[14] ); GPU_FFT4( v[3], v[7], v[11], v[15] ); v[5] = (v[5] * exp_1_8 ) * M_SQRT1_2; v[6] = v[6] * exp_1_4; v[7] = (v[7] * exp_3_8 ) * M_SQRT1_2; v[9] = v[9] * exp_1_16; v[10] = (v[10] * exp_1_8 ) * M_SQRT1_2; v[11] = v[11] * exp_3_16; v[13] = v[13] * exp_3_16; v[14] = (v[14] * exp_3_8 ) * M_SQRT1_2; v[15] = v[15] * exp_9_16; GPU_FFT4( v[0], v[1], v[2], v[3] ); GPU_FFT4( v[4], v[5], v[6], v[7] ); GPU_FFT4( v[8], v[9], v[10], v[11] ); GPU_FFT4( v[12], v[13], v[14], v[15] ); } __device__ int GPU_expand(int idxL, int N1, int N2 ){ return (idxL/N1)*N1*N2 + (idxL%N1); } __device__ void GPU_FftIteration(int j, int Ns, FT2* data0, FT2* data1){ FT2 v[R]; int idxS = j; FT angle = -2*M_PI*(j%Ns)/(Ns*R); for( int r=0; r<R; r++ ) { v[r] = data0[idxS+r*NN/R]; v[r] = v[r]*make_FT2(cos(r*angle), sin(r*angle)); } #if R == 2 GPU_FFT2( v ); #endif #if R == 4 GPU_FFT4( v ); #endif #if R == 8 GPU_FFT8( v ); #endif #if R == 16 GPU_FFT16( v ); #endif int idxD = GPU_expand(j,Ns,R); for( int r=0; r<R; r++ ){ data1[idxD+r*Ns] = v[r]; } } __global__ void GPU_FFT_Global(int Ns, FT2* data0, FT2* data1) { data0+=bx*NN; data1+=bx*NN; GPU_FftIteration( tx, Ns, data0, data1); } int main( int argc, char **argv ) { int n_bytes; // check config assert(NN > 0); assert(BB > 0); assert(R > 0); unsigned int nn = NN; unsigned int rr = R; while (nn > 0) { if (nn == 1) break; assert(nn / rr > 0); nn = nn / rr; } // get IO file names assert(argc == 3); char *inname = argv[1]; char *outname = argv[2]; // int N, B; n_bytes = NN*BB*sizeof(FT2); hipSetDevice(GPUID); FT2 *source; FT2 *result; hipHostMalloc((void**)&source, n_bytes); CUERR; hipHostMalloc((void**)&result, n_bytes); CUERR; FT2 *d_source, *d_work; hipMalloc((void**) &d_source, n_bytes); CUERR; hipMalloc((void**) &d_work, n_bytes); CUERR; FILE * infile = fopen(inname, "r"); assert(infile != NULL); fseek(infile, 0, SEEK_END); long fsize = ftell(infile); fseek(infile, 0, SEEK_SET); assert(fsize % (NN*BB*sizeof(IFT)*2) == 0); unsigned int n_repeats = fsize / (NN*BB*sizeof(IFT)*2); FILE *outfile = fopen(outname, "w"); assert(outfile != NULL); for (unsigned int ri = 0 ; ri < n_repeats ; ri++) { inputData(infile,(FT2*)source,NN*BB); // copy host memory to device hipMemcpy(d_source, source, n_bytes,hipMemcpyHostToDevice); CUERR; hipMemset(d_work, 0,n_bytes); CUERR; for( int Ns=1; Ns<NN; Ns*=R){ hipLaunchKernelGGL(( GPU_FFT_Global), dim3(dim3(BB)), dim3(dim3(NN/R)), 0, 0, Ns, d_source, d_work); FT2 *tmp = d_source; d_source = d_work; d_work = tmp; } hipMemcpy(result, d_source, n_bytes,hipMemcpyDeviceToHost); CUERR; writeOutput64to128(outfile, (FT)result[NN*BB-1].y); } hipFree(d_source); CUERR; hipFree(d_work); CUERR; hipHostFree(source); hipHostFree(result); fclose(infile); fclose(outfile); return 0; }
ad89e8e40ee8f23dd98bfbaab60258fa7278c2cf.cu
/*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include <stdio.h> #include <cuda.h> #include <assert.h> #include <iostream> #include "dataio.h" using namespace std; #ifndef FT #define FT float #define FT2 float2 #define make_FT2 make_float2 #endif #ifndef IFT #define IFT float #endif #ifndef NN #define NN 4 #endif #ifndef BB #define BB 4 #endif #ifndef GPUID #define GPUID 0 #endif #define CUERR { cudaError_t err; \ if ((err = cudaGetLastError()) != cudaSuccess) { \ printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \ return -1; }} // Block index #define bx blockIdx.x #define by blockIdx.y // Thread index #define tx threadIdx.x // Possible values are 2, 4, 8 and 16 #ifndef R #define R 2 #endif inline FT2 __device__ operator*( FT2 a, FT2 b ) { return make_FT2( a.x*b.x-a.y*b.y, a.x*b.y+a.y*b.x ); } inline FT2 __device__ operator+( FT2 a, FT2 b ) { return make_FT2( a.x + b.x, a.y + b.y ); } inline FT2 __device__ operator-( FT2 a, FT2 b ) { return make_FT2( a.x - b.x, a.y - b.y ); } inline FT2 __device__ operator*( FT2 a, FT b ) { return make_FT2( b*a.x , b*a.y); } #define COS_PI_8 0.923879533f #define SIN_PI_8 0.382683432f #define exp_1_16 make_FT2( COS_PI_8, -SIN_PI_8 ) #define exp_3_16 make_FT2( SIN_PI_8, -COS_PI_8 ) #define exp_5_16 make_FT2( -SIN_PI_8, -COS_PI_8 ) #define exp_7_16 make_FT2( -COS_PI_8, -SIN_PI_8 ) #define exp_9_16 make_FT2( -COS_PI_8, SIN_PI_8 ) #define exp_1_8 make_FT2( 1, -1 ) #define exp_1_4 make_FT2( 0, -1 ) #define exp_3_8 make_FT2( -1, -1 ) void inputData(FILE* fid, FT2* dat, int num_ft2) { assert(sizeof(FT) == 4 || sizeof(FT) == 8); if (fid == NULL) { fprintf(stderr, "Cannot open input file\n"); exit(-1); } for (unsigned int i = 0 ; i < num_ft2 ; i++) { IFT in_data; fread(&(in_data), sizeof(IFT), 1, fid); if (sizeof(FT) == 4) dat[i].x = (float) in_data; else // (sizeof(FT) == 8) dat[i].x = (double) in_data; fread(&(in_data), sizeof(IFT), 1, fid); if (sizeof(FT) == 4) dat[i].y = (float) in_data; else // (sizeof(FT) == 8) dat[i].y = (double) in_data; } } __device__ void GPU_FFT2( FT2 &v1,FT2 &v2 ) { FT2 v0 = v1; v1 = v0 + v2; v2 = v0 - v2; } __device__ void GPU_FFT4( FT2 &v0,FT2 &v1,FT2 &v2,FT2 &v3) { GPU_FFT2(v0, v2); GPU_FFT2(v1, v3); v3 = v3 * exp_1_4; GPU_FFT2(v0, v1); GPU_FFT2(v2, v3); } inline __device__ void GPU_FFT2(FT2* v){ GPU_FFT2(v[0],v[1]); } inline __device__ void GPU_FFT4(FT2* v){ GPU_FFT4(v[0],v[1],v[2],v[3] ); } inline __device__ void GPU_FFT8(FT2* v){ GPU_FFT2(v[0],v[4]); GPU_FFT2(v[1],v[5]); GPU_FFT2(v[2],v[6]); GPU_FFT2(v[3],v[7]); v[5]=(v[5]*exp_1_8)*M_SQRT1_2; v[6]=v[6]*exp_1_4; v[7]=(v[7]*exp_3_8)*M_SQRT1_2; GPU_FFT4(v[0],v[1],v[2],v[3]); GPU_FFT4(v[4],v[5],v[6],v[7]); } inline __device__ void GPU_FFT16( FT2 *v ) { GPU_FFT4( v[0], v[4], v[8], v[12] ); GPU_FFT4( v[1], v[5], v[9], v[13] ); GPU_FFT4( v[2], v[6], v[10], v[14] ); GPU_FFT4( v[3], v[7], v[11], v[15] ); v[5] = (v[5] * exp_1_8 ) * M_SQRT1_2; v[6] = v[6] * exp_1_4; v[7] = (v[7] * exp_3_8 ) * M_SQRT1_2; v[9] = v[9] * exp_1_16; v[10] = (v[10] * exp_1_8 ) * M_SQRT1_2; v[11] = v[11] * exp_3_16; v[13] = v[13] * exp_3_16; v[14] = (v[14] * exp_3_8 ) * M_SQRT1_2; v[15] = v[15] * exp_9_16; GPU_FFT4( v[0], v[1], v[2], v[3] ); GPU_FFT4( v[4], v[5], v[6], v[7] ); GPU_FFT4( v[8], v[9], v[10], v[11] ); GPU_FFT4( v[12], v[13], v[14], v[15] ); } __device__ int GPU_expand(int idxL, int N1, int N2 ){ return (idxL/N1)*N1*N2 + (idxL%N1); } __device__ void GPU_FftIteration(int j, int Ns, FT2* data0, FT2* data1){ FT2 v[R]; int idxS = j; FT angle = -2*M_PI*(j%Ns)/(Ns*R); for( int r=0; r<R; r++ ) { v[r] = data0[idxS+r*NN/R]; v[r] = v[r]*make_FT2(cos(r*angle), sin(r*angle)); } #if R == 2 GPU_FFT2( v ); #endif #if R == 4 GPU_FFT4( v ); #endif #if R == 8 GPU_FFT8( v ); #endif #if R == 16 GPU_FFT16( v ); #endif int idxD = GPU_expand(j,Ns,R); for( int r=0; r<R; r++ ){ data1[idxD+r*Ns] = v[r]; } } __global__ void GPU_FFT_Global(int Ns, FT2* data0, FT2* data1) { data0+=bx*NN; data1+=bx*NN; GPU_FftIteration( tx, Ns, data0, data1); } int main( int argc, char **argv ) { int n_bytes; // check config assert(NN > 0); assert(BB > 0); assert(R > 0); unsigned int nn = NN; unsigned int rr = R; while (nn > 0) { if (nn == 1) break; assert(nn / rr > 0); nn = nn / rr; } // get IO file names assert(argc == 3); char *inname = argv[1]; char *outname = argv[2]; // int N, B; n_bytes = NN*BB*sizeof(FT2); cudaSetDevice(GPUID); FT2 *source; FT2 *result; cudaMallocHost((void**)&source, n_bytes); CUERR; cudaMallocHost((void**)&result, n_bytes); CUERR; FT2 *d_source, *d_work; cudaMalloc((void**) &d_source, n_bytes); CUERR; cudaMalloc((void**) &d_work, n_bytes); CUERR; FILE * infile = fopen(inname, "r"); assert(infile != NULL); fseek(infile, 0, SEEK_END); long fsize = ftell(infile); fseek(infile, 0, SEEK_SET); assert(fsize % (NN*BB*sizeof(IFT)*2) == 0); unsigned int n_repeats = fsize / (NN*BB*sizeof(IFT)*2); FILE *outfile = fopen(outname, "w"); assert(outfile != NULL); for (unsigned int ri = 0 ; ri < n_repeats ; ri++) { inputData(infile,(FT2*)source,NN*BB); // copy host memory to device cudaMemcpy(d_source, source, n_bytes,cudaMemcpyHostToDevice); CUERR; cudaMemset(d_work, 0,n_bytes); CUERR; for( int Ns=1; Ns<NN; Ns*=R){ GPU_FFT_Global<<<dim3(BB), dim3(NN/R)>>>(Ns, d_source, d_work); FT2 *tmp = d_source; d_source = d_work; d_work = tmp; } cudaMemcpy(result, d_source, n_bytes,cudaMemcpyDeviceToHost); CUERR; writeOutput64to128(outfile, (FT)result[NN*BB-1].y); } cudaFree(d_source); CUERR; cudaFree(d_work); CUERR; cudaFreeHost(source); cudaFreeHost(result); fclose(infile); fclose(outfile); return 0; }
fca643400426332964f70efbca79b20ad8875eb8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // GPU kernel for convoluting sine and cosine multiplication data with filter coefficients with hamming window .... __global__ void conv(float *dev_op_sine, float *dev_op_cosine, float *dev_op_sine_conv, float *dev_op_cosine_conv, float *dev_lpf_hamming, int b, int windowLength){ int i,k,l; int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; float temp1, temp2; for(i = idx; i < b; i+=stride){ temp1 = 0; temp2 = 0; for(k = 0; k < windowLength; k++){ l = windowLength - k; temp1 += dev_op_sine[i+l] * dev_lpf_hamming[k]; temp2 += dev_op_cosine[i+l] * dev_lpf_hamming[k]; } dev_op_sine_conv[i] = temp1; dev_op_cosine_conv[i] = temp2; } }
fca643400426332964f70efbca79b20ad8875eb8.cu
// GPU kernel for convoluting sine and cosine multiplication data with filter coefficients with hamming window .... __global__ void conv(float *dev_op_sine, float *dev_op_cosine, float *dev_op_sine_conv, float *dev_op_cosine_conv, float *dev_lpf_hamming, int b, int windowLength){ int i,k,l; int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; float temp1, temp2; for(i = idx; i < b; i+=stride){ temp1 = 0; temp2 = 0; for(k = 0; k < windowLength; k++){ l = windowLength - k; temp1 += dev_op_sine[i+l] * dev_lpf_hamming[k]; temp2 += dev_op_cosine[i+l] * dev_lpf_hamming[k]; } dev_op_sine_conv[i] = temp1; dev_op_cosine_conv[i] = temp2; } }
892040136f130148518b9dfef24d839002468227.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/details/nan_inf_utils.h" #include "paddle/fluid/framework/details/nan_inf_utils_detail.h" #include <algorithm> #include <unordered_map> #include <utility> #include <vector> namespace paddle { namespace framework { namespace details { static std::once_flag init_multi_gpu_op_var_map_flag; // lazy init static std::vector<std::unordered_map<std::string, memory::AllocationPtr>>& multi_op_var2gpu_str() { static std::vector<std::unordered_map<std::string, memory::AllocationPtr>> _multi_op_var2gpu_str; return _multi_op_var2gpu_str; } static std::vector<std::mutex>& multi_op_var2gpu_str_mutex() { static std::vector<std::mutex> _multi_op_var2gpu_str_mutex; return _multi_op_var2gpu_str_mutex; } static void InitMultiGPUOpVarMap() { int dev_count = platform::GetCUDADeviceCount(); PADDLE_ENFORCE_GT(dev_count, 0, platform::errors::NotFound( "cuda device must > 0, now dev_count=%d", dev_count)); // https://stackoverflow.com/questions/16465633/how-can-i-use-something-like-stdvectorstdmutex std::vector<std::unordered_map<std::string, memory::AllocationPtr>> tmp_multi( dev_count); std::vector<std::mutex> tmp_multi_mutex(dev_count); multi_op_var2gpu_str().swap(tmp_multi); multi_op_var2gpu_str_mutex().swap(tmp_multi_mutex); } template <typename T> __device__ __forceinline__ void PrintNanInfKernel(const T* value, const size_t numel, int print_num, char* debug_info) { const size_t tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ unsigned int nan_count, inf_count, num_count; if (threadIdx.x == 0) nan_count = inf_count = num_count = 0; __syncthreads; for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) { unsigned int count = 0; if (isnan(value[i])) { count = atomicAdd(&nan_count, 1); } else if (isinf(value[i])) { count = atomicAdd(&inf_count, 1); } else { count = atomicAdd(&num_count, 1); } // for cuda, print in every block if (count < print_num) { printf("numel:%lu idx:%lu value:%f\n", static_cast<uint64_t>(numel), static_cast<uint64_t>(i), static_cast<float>(value[i])); } } __syncthreads; #ifdef PADDLE_WITH_HIP if (true && hipThreadIdx_x == 0) { printf("In block %d, there has %u,%u,%u nan,inf,num\n", hipBlockIdx_x, nan_count, inf_count, num_count); #else if (true && threadIdx.x == 0) { printf("In block %d, there has %u,%u,%u nan,inf,num\n", blockIdx.x, nan_count, inf_count, num_count); #endif PADDLE_ENFORCE(false, "===ERROR: in %s find nan or inf===", debug_info); } } // Resnet 2gpus speed test, no check 270 images/s, this check 229 images/s template <typename T> __global__ void CheckNanInfKernel(const T* value, const size_t numel, int print_num, char* debug_info) { /// step 1, judge wheater has nan or inf __shared__ volatile int has_nan_inf; if (threadIdx.x == 0) has_nan_inf = false; __syncthreads(); const size_t tid = threadIdx.x + blockIdx.x * blockDim.x; T sum = static_cast<T>(0.0); // Todo(wangxi). simd speed up for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) { sum += (value[i] - value[i]); } if (isnan(sum) || isinf(sum)) has_nan_inf = true; __syncthreads(); /// Note. different blocks may behave differently if (!has_nan_inf) return; PrintNanInfKernel(value, numel, print_num, debug_info); } template <> template <typename T> void TensorCheckerVisitor<platform::CUDADeviceContext>::apply( typename std::enable_if<std::is_floating_point<T>::value>::type*) const { int print_num = 3; auto* dev_ctx = reinterpret_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get(tensor_.place())); int dev_id = BOOST_GET_CONST(platform::CUDAPlace, tensor_.place()).device; PADDLE_ENFORCE_EQ( (dev_id >= 0 && dev_id < multi_op_var2gpu_str_mutex().size()), true, platform::errors::OutOfRange("GPU dev_id must >=0 and < dev_count=%d", multi_op_var2gpu_str_mutex().size())); std::string op_var = "[op=" + op_type_ + "] [tensor=" + var_name_ + "]"; char* gpu_str_ptr = NULL; { auto& op_var2gpu_str_mutex = multi_op_var2gpu_str_mutex().at(dev_id); auto& op_var2gpu_str = multi_op_var2gpu_str().at(dev_id); std::lock_guard<std::mutex> guard(op_var2gpu_str_mutex); if (op_var2gpu_str.find(op_var) == op_var2gpu_str.end()) { // insert auto gpu_str_tensor = paddle::memory::Alloc(*dev_ctx, op_var.length() + 1); gpu_str_ptr = reinterpret_cast<char*>(gpu_str_tensor->ptr()); op_var2gpu_str.emplace(op_var, std::move(gpu_str_tensor)); auto iter = op_var2gpu_str.find(op_var); PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true, platform::errors::PreconditionNotMet( "op_var=%s should successed insert into " "op_var2gpu_str, but now failed", op_var)); #ifdef PADDLE_WITH_HIP PADDLE_ENFORCE_CUDA_SUCCESS( hipMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1, hipMemcpyHostToDevice, dev_ctx->stream())); #else PADDLE_ENFORCE_CUDA_SUCCESS( hipMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1, hipMemcpyHostToDevice, dev_ctx->stream())); #endif } else { // get auto iter = op_var2gpu_str.find(op_var); PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true, platform::errors::PreconditionNotMet( "op_var=%s should be in the op_var2gpu_str, but " "now can't find it", op_var)); gpu_str_ptr = reinterpret_cast<char*>(iter->second->ptr()); } } const size_t threads = 1024; size_t blocks = ::min(static_cast<size_t>(128), static_cast<size_t>((tensor_.numel() + threads - 1) / threads)); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(CheckNanInfKernel, dim3(blocks), dim3(threads), 0, dev_ctx->stream(), tensor_.data<T>(), tensor_.numel(), print_num, gpu_str_ptr); #else hipLaunchKernelGGL(( CheckNanInfKernel), dim3(blocks), dim3(threads), 0, dev_ctx->stream(), tensor_.data<T>(), tensor_.numel(), print_num, gpu_str_ptr); #endif } template <> void tensor_check<platform::CUDADeviceContext>(const std::string& op_type, const std::string& var_name, const framework::Tensor& tensor, const platform::Place& place) { std::call_once(init_multi_gpu_op_var_map_flag, InitMultiGPUOpVarMap); TensorCheckerVisitor<platform::CUDADeviceContext> vistor(op_type, var_name, tensor, place); VisitDataType(tensor.type(), vistor); } } // namespace details } // namespace framework } // namespace paddle
892040136f130148518b9dfef24d839002468227.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/details/nan_inf_utils.h" #include "paddle/fluid/framework/details/nan_inf_utils_detail.h" #include <algorithm> #include <unordered_map> #include <utility> #include <vector> namespace paddle { namespace framework { namespace details { static std::once_flag init_multi_gpu_op_var_map_flag; // lazy init static std::vector<std::unordered_map<std::string, memory::AllocationPtr>>& multi_op_var2gpu_str() { static std::vector<std::unordered_map<std::string, memory::AllocationPtr>> _multi_op_var2gpu_str; return _multi_op_var2gpu_str; } static std::vector<std::mutex>& multi_op_var2gpu_str_mutex() { static std::vector<std::mutex> _multi_op_var2gpu_str_mutex; return _multi_op_var2gpu_str_mutex; } static void InitMultiGPUOpVarMap() { int dev_count = platform::GetCUDADeviceCount(); PADDLE_ENFORCE_GT(dev_count, 0, platform::errors::NotFound( "cuda device must > 0, now dev_count=%d", dev_count)); // https://stackoverflow.com/questions/16465633/how-can-i-use-something-like-stdvectorstdmutex std::vector<std::unordered_map<std::string, memory::AllocationPtr>> tmp_multi( dev_count); std::vector<std::mutex> tmp_multi_mutex(dev_count); multi_op_var2gpu_str().swap(tmp_multi); multi_op_var2gpu_str_mutex().swap(tmp_multi_mutex); } template <typename T> __device__ __forceinline__ void PrintNanInfKernel(const T* value, const size_t numel, int print_num, char* debug_info) { const size_t tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ unsigned int nan_count, inf_count, num_count; if (threadIdx.x == 0) nan_count = inf_count = num_count = 0; __syncthreads; for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) { unsigned int count = 0; if (isnan(value[i])) { count = atomicAdd(&nan_count, 1); } else if (isinf(value[i])) { count = atomicAdd(&inf_count, 1); } else { count = atomicAdd(&num_count, 1); } // for cuda, print in every block if (count < print_num) { printf("numel:%lu idx:%lu value:%f\n", static_cast<uint64_t>(numel), static_cast<uint64_t>(i), static_cast<float>(value[i])); } } __syncthreads; #ifdef PADDLE_WITH_HIP if (true && hipThreadIdx_x == 0) { printf("In block %d, there has %u,%u,%u nan,inf,num\n", hipBlockIdx_x, nan_count, inf_count, num_count); #else if (true && threadIdx.x == 0) { printf("In block %d, there has %u,%u,%u nan,inf,num\n", blockIdx.x, nan_count, inf_count, num_count); #endif PADDLE_ENFORCE(false, "===ERROR: in %s find nan or inf===", debug_info); } } // Resnet 2gpus speed test, no check 270 images/s, this check 229 images/s template <typename T> __global__ void CheckNanInfKernel(const T* value, const size_t numel, int print_num, char* debug_info) { /// step 1, judge wheater has nan or inf __shared__ volatile int has_nan_inf; if (threadIdx.x == 0) has_nan_inf = false; __syncthreads(); const size_t tid = threadIdx.x + blockIdx.x * blockDim.x; T sum = static_cast<T>(0.0); // Todo(wangxi). simd speed up for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) { sum += (value[i] - value[i]); } if (isnan(sum) || isinf(sum)) has_nan_inf = true; __syncthreads(); /// Note. different blocks may behave differently if (!has_nan_inf) return; PrintNanInfKernel(value, numel, print_num, debug_info); } template <> template <typename T> void TensorCheckerVisitor<platform::CUDADeviceContext>::apply( typename std::enable_if<std::is_floating_point<T>::value>::type*) const { int print_num = 3; auto* dev_ctx = reinterpret_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get(tensor_.place())); int dev_id = BOOST_GET_CONST(platform::CUDAPlace, tensor_.place()).device; PADDLE_ENFORCE_EQ( (dev_id >= 0 && dev_id < multi_op_var2gpu_str_mutex().size()), true, platform::errors::OutOfRange("GPU dev_id must >=0 and < dev_count=%d", multi_op_var2gpu_str_mutex().size())); std::string op_var = "[op=" + op_type_ + "] [tensor=" + var_name_ + "]"; char* gpu_str_ptr = NULL; { auto& op_var2gpu_str_mutex = multi_op_var2gpu_str_mutex().at(dev_id); auto& op_var2gpu_str = multi_op_var2gpu_str().at(dev_id); std::lock_guard<std::mutex> guard(op_var2gpu_str_mutex); if (op_var2gpu_str.find(op_var) == op_var2gpu_str.end()) { // insert auto gpu_str_tensor = paddle::memory::Alloc(*dev_ctx, op_var.length() + 1); gpu_str_ptr = reinterpret_cast<char*>(gpu_str_tensor->ptr()); op_var2gpu_str.emplace(op_var, std::move(gpu_str_tensor)); auto iter = op_var2gpu_str.find(op_var); PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true, platform::errors::PreconditionNotMet( "op_var=%s should successed insert into " "op_var2gpu_str, but now failed", op_var)); #ifdef PADDLE_WITH_HIP PADDLE_ENFORCE_CUDA_SUCCESS( hipMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1, hipMemcpyHostToDevice, dev_ctx->stream())); #else PADDLE_ENFORCE_CUDA_SUCCESS( cudaMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1, cudaMemcpyHostToDevice, dev_ctx->stream())); #endif } else { // get auto iter = op_var2gpu_str.find(op_var); PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true, platform::errors::PreconditionNotMet( "op_var=%s should be in the op_var2gpu_str, but " "now can't find it", op_var)); gpu_str_ptr = reinterpret_cast<char*>(iter->second->ptr()); } } const size_t threads = 1024; size_t blocks = std::min(static_cast<size_t>(128), static_cast<size_t>((tensor_.numel() + threads - 1) / threads)); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(CheckNanInfKernel, dim3(blocks), dim3(threads), 0, dev_ctx->stream(), tensor_.data<T>(), tensor_.numel(), print_num, gpu_str_ptr); #else CheckNanInfKernel<<<blocks, threads, 0, dev_ctx->stream()>>>( tensor_.data<T>(), tensor_.numel(), print_num, gpu_str_ptr); #endif } template <> void tensor_check<platform::CUDADeviceContext>(const std::string& op_type, const std::string& var_name, const framework::Tensor& tensor, const platform::Place& place) { std::call_once(init_multi_gpu_op_var_map_flag, InitMultiGPUOpVarMap); TensorCheckerVisitor<platform::CUDADeviceContext> vistor(op_type, var_name, tensor, place); VisitDataType(tensor.type(), vistor); } } // namespace details } // namespace framework } // namespace paddle
2b876f02dd0fc9cd73cd097f9be0c92df933dbd3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // CurveTracing // #include "CurveTracing.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; #include "Template.h" #include "TemplateFactory.h" // CURVE_VALUE( // #define CURVE_VALUE 1000 // DEF_BLOCK_X DEF_BLOCK_Y // #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // Kernel _traverseKer // // // 1 // 3 // static __global__ void // Kernel _traverseKer( ImageCuda inimg, // ImageCuda outimg, // int *array1_dev, // int *array2_dev, // Template boxtpl // 3 * 3 ); // Kernel _traverseKerNew // 0 CPU // static __global__ void // Kernel _traverseKerNew( ImageCuda inimg, // int *array1_dev // ); // Host traverse // // 1 // 3 // // CPU static __host__ void // traverse( DynamicArrays &Vertex, // DynamicArrays &Intersect, // Image *inimg, // Image *outimg, // int *tpl // ); // Host traverseNew // 0 CPU // static __host__ void // traverseNew( DynamicArrays &array, // Image *inimg // ); // Host getCurve // static __host__ void // getCurve( DynamicArrays *pcurve, // int &test, // int count, // Image *img, // int *mark, // // 0 1 int *tpl, // int Vx, // x int Vy // y ); // Host insectClassify // // // static __host__ void // insectClassify( int x, // x int y, // y DynamicArrays &Intersect, // DynamicArrays *insect, // int sectnum, // int *tpl // ); // Host makeCur // // x y 45 // x y static __host__ void // makeCur( DynamicArrays &cur, // int dx1, // x int dy1, // y int dx2, // x int dy2 // y ); // Host interAssemble // // static __host__ void // interAssemble( DynamicArrays *pcurve, // int count, // DynamicArrays *insect, // int sectnum, // DynamicArrays realsect, // int *tpl // ); // Host bpConnect // static __host__ void bpConnect( DynamicArrays *pcurve, // int count, // int radius, // DynamicArrays *psect, // int *pcount, // DynamicArrays &Vertex // ); // Host AidNorm // // static __host__ bool // bool true // false AidNorm( DynamicArrays *pcurve, // int i, // i int count, // DynamicArrays *psect, // int pcount, // int radius, // DynamicArrays &Vertex, // int x, int y // ); // Host pcurveAcord // // static __host__ int // 0 // 1 -1 pcurveAcord( DynamicArrays *pcurve, // int count, // int &location, // int x, int y // ); // Host verAssemble // // // // // static __host__ void verAssemble( DynamicArrays *pcurve, // int count, // DynamicArrays *psect, // int pcount, // DynamicArrays &realsect // ); // Host IsFindPoint static __host__ bool // bool true // false IsFindPoint( DynamicArrays &array, // int x, int y // ); // Host makeNode // // static __host__ void // makeNode( DynamicArrays *pcurve, // int count, // DynamicArrays *pcurno // ); // Host openCurvePath // start end static __host__ void // openCurvePath( DynamicArrays *opencurnode, // int *openNum, // Graph *G, // int start, // int end // ); // Host closeCurvePath // start end static __host__ void // closeCurvePath( DynamicArrays *closecurnode, // int *closeNum, // Graph *G, // int insect // ); // Host IsArrayEqual // // // // static __host__ bool // bool true // false IsArrayEqual( DynamicArrays object1, // 1 DynamicArrays object2 // 2 ); // Host getPointNo // static __host__ void // getPointNo( DynamicArrays *pcurve, // int count, // DynamicArrays *pcurno, // DynamicArrays &array, // DynamicArrays &arrayno // ); // Host getCurveNonFormat // static __host__ void // getCurveNonFormat( DynamicArrays *curnode, // DynamicArrays *pcurve, // int count, // DynamicArrays *pcurno, // DynamicArrays *cur, // int num, // bool close = false // ); // Host traverse static __host__ void traverse(DynamicArrays &Vertex, DynamicArrays &Intersect, Image *inimg, Image *outimg,int *tpl) { // int i, j, k; // int dx, dy; // 0 for (i = 0; i < inimg->height; i++) { for(j = 0; j < inimg->width; j++) { // 0 if (inimg->imgData[i * inimg->width + j] == 0) { outimg->imgData[i * inimg->width + j] = 0; continue; } // 0 int m = 0; // 0 0 int flag = 0; for(k = 0; k < 8; k++) { dx = j + tpl[m++]; dy = i + tpl[m++]; // 0 flag if (dx >= 0 && dx < inimg->width && dy >= 0 && dy < inimg->height) { if (inimg->imgData[dy * inimg->width + dx] != 0) { flag++; } } } // flag 0 0 // 0 if (flag == 0) { outimg->imgData[i * inimg->width + j] = 0; // flag 1 0 // } else if (flag == 1) { Vertex.addElem(j); Vertex.addElem(i); outimg->imgData[i * inimg->width + j] = inimg->imgData[i * inimg->width + j]; // flag 3 // 0 } else if (flag >= 3) { Intersect.addElem(j); Intersect.addElem(i); outimg->imgData[i * inimg->width + j] = 0; // flag 2, // } else { outimg->imgData[i * inimg->width + j] = inimg->imgData[i * inimg->width + j]; } } } } // Host traverseNew static __host__ void traverseNew(DynamicArrays &array, Image *inimg) { // int i, j; // 0 for (i = 0; i < inimg->height; i++) { for(j = 0; j < inimg->width; j++) { // 0 if (inimg->imgData[i * inimg->width + j] != 0) { // 0 array.addElem(j); array.addElem(i); } } } } // Host getCurve static __host__ void getCurve(DynamicArrays *pcurve, int &test, int count, Image *img, int *mark, int *tpl, int Vx, int Vy) { // test 1 // if (mark[Vy * img->width + Vx] == 1) { test++; return; } // int dx, dy; int j = 0; // // 0 int flag = 0; // count pcurve[count].addElem(Vx); pcurve[count].addElem(Vy); mark[Vy * img->width + Vx] = 1; // for(int i = 0; i < 8; i++) { dx = Vx + tpl[j++]; dy = Vy + tpl[j++]; // 0 flag 1 if (img->imgData[dy * img->width + dx] != 0 && mark[dy * img->width + dx] != 1) { flag = 1; break; } } // flag 1 if (flag == 1) { getCurve(pcurve, test, count, img, mark, tpl, dx, dy); } // return; } // Host insectClassify static __host__ void insectClassify(int x, int y, DynamicArrays &Intersect, DynamicArrays *insect, int sectnum, int *tpl) { // xy insect[sectnum - 1].addElem(x); insect[sectnum - 1].addElem(y); // xy Intersect.delElem(x, y); // if (Intersect.getSize() == 0) return; // int dx, dy; for(int i = 0; i < 16; i += 2) { dx = x + tpl[i]; dy = y + tpl[i + 1]; // for(int j = 0; j < Intersect.getSize(); j += 2) { if (dx == Intersect[j] && dy == Intersect[j + 1]) { insectClassify(dx, dy, Intersect, insect, sectnum, tpl); } } } // return; } // Host makeCur static __host__ void makeCur(DynamicArrays &cur, int dx1, int dy1, int dx2, int dy2) { // int x, y; // cur.addElem(dx1); cur.addElem(dy1); // if (dx1 == dx2 && dy1 == dy2) return; // int m = dx1 - dx2, n = dy1 - dy2; // x = dx1; y = dy1; // // // m >= 0 n >= 0 if (m >= 0 && n >= 0) { // int d = m - n; // if (d >= 0) { for (int c = 0; c < n; c++) { x--; y--; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < d; c++) { x--; cur.addElem(x); cur.addElem(y); } } else { for (int c = 0; c < m; c++) { x--; y--; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < -d; c++) { y--; cur.addElem(x); cur.addElem(y); } } // m >= 0 n < 0 } else if (m >= 0 && n < 0) { n = -n; int d = m - n; if (d >= 0) { for (int c = 0; c < n; c++) { x--; y++; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < d; c++) { x--; cur.addElem(x); cur.addElem(y); } } else { for (int c = 0; c < m; c++) { x--; y++; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < -d; c++) { y++; cur.addElem(x); cur.addElem(y); } } // m < 0 n >= 0 } else if (m < 0 && n >= 0) { m = -m; int d = m - n; if (d >= 0) { for (int c = 0; c < n; c++) { x++; y--; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < d; c++) { x++; cur.addElem(x); cur.addElem(y); } } else { for (int c = 0; c < m; c++) { x++; y--; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < -d; c++) { y--; cur.addElem(x); cur.addElem(y); } } // m < 0 n < 0 } else { m = -m; n = -n; int d = m - n; if (d >= 0) { for (int c = 0; c < n; c++) { x++; y++; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < d; c++) { x++; cur.addElem(x); cur.addElem(y); } } else { for (int c = 0; c < m; c++) { x++; y++; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < -d; c++) { y++; cur.addElem(x); cur.addElem(y); } } } } // Host interAssemble static __host__ void interAssemble(DynamicArrays *pcurve, int count, DynamicArrays *insect, int sectnum, DynamicArrays realsect, int *tpl) { // if (realsect.getSize() == 0) return; // int i, j, k, x1, y1, x2, y2, dx1, dy1, dx2, dy2, num, num2; int mark1, mark2, flag1, flag2; // // for(i = 0; i < count; i++) { // flag1 = 0; flag2 = 0; // -1 mark1 = -1; mark2 = -1; // num = pcurve[i].getSize(); // x1 = pcurve[i][0]; y1 = pcurve[i][1]; x2 = pcurve[i][num - 2]; y2 = pcurve[i][num - 1]; // for (j = 0; j < 16; j += 2) { // dx1 = x1 + tpl[j]; dy1 = y1 + tpl[j + 1]; // for (k = 0; k < sectnum; k++) { // num2 = insect[k].getSize(); // for (int m = 0; m < num2; m += 2) { if (dx1 == insect[k][m] && dy1 == insect[k][m + 1]) { mark1 = k; flag1 += 1; break; } } // if (flag1) { break; } } // if (flag1) { break; } } // for (j = 0; j < 16; j += 2) { // dx2 = x2 + tpl[j]; dy2 = y2 + tpl[j + 1]; // for (k = 0; k < sectnum; k++) { // num2 = insect[k].getSize(); // for (int m = 0; m < num2; m += 2) { if (dx2 == insect[k][m] && dy2 == insect[k][m + 1]) { mark2 = k; flag2 += 1; break; } } // if (flag2) { break; } } // if (flag2) { break; } } // if (mark1 < 0 && mark2 < 0) { continue; } // // if (mark1 >= 0 && mark2 < 0) { // pcurve[i].reverse(); // DynamicArrays temp; makeCur(temp, dx1, dy1, realsect[2 * mark1], realsect[2 * mark1 + 1]); pcurve[i].addArray(temp); // // } else if (mark1 < 0 && mark2 >= 0) { // DynamicArrays temp; makeCur(temp, dx2, dy2, realsect[2 * mark2], realsect[2 * mark2 + 1]); pcurve[i].addArray(temp); // // // } else { // DynamicArrays temp; makeCur(temp, dx2, dy2, realsect[2 * mark2], realsect[2 * mark2 + 1]); pcurve[i].addArray(temp); // temp.clear(); // pcurve[i].reverse(); // makeCur(temp, dx1, dy1, realsect[2 * mark1], realsect[2 * mark1 + 1]); pcurve[i].addArray(temp); } } } // Host pcurveAcord static __host__ int pcurveAcord(DynamicArrays *pcurve, int count, int &location, int x, int y) { // int i, dx1, dy1, dx2, dy2; // location for (i = 0; i < count; i++) { // dx1 = pcurve[i][0]; dy1 = pcurve[i][1]; dx2 = pcurve[i][pcurve[i].getSize() - 2]; dy2 = pcurve[i][pcurve[i].getSize() - 1]; // // 0 1 if ((dx1 == x) && (dy1 == y)) { location = i; return 0; } if ((dx2 == x) && (dy2 == y)) { location = i; return 1; } } // -1 return -1; } // Host verAssemble static __host__ void verAssemble(DynamicArrays *pcurve, int count, DynamicArrays *psect, int pcount, DynamicArrays &realsect) { // int i, j, dx, dy, mark, location; int cen_x, cen_y; // for (i = 0; i < pcount; i++) { cen_x = 0; cen_y = 0; for (j = 0; j < psect[i].getSize();) { cen_x += psect[i][j++]; cen_y += psect[i][j++]; } // cen_x = cen_x * 2 / j; cen_y = cen_y * 2 / j; realsect.addTail(cen_x, cen_y); // for (j = 0; j < psect[i].getSize();) { dx = psect[i][j++]; dy = psect[i][j++]; if ((mark = pcurveAcord(pcurve, count, location, dx, dy)) != -1) { if(!mark) { pcurve[location].reverse(); } DynamicArrays temp; makeCur(temp, dx, dy, cen_x, cen_y); temp.delElemXY(dx, dy); pcurve[location].addArray(temp); } } } } // Host IsFindPoint static __host__ bool IsFindPoint(DynamicArrays &array, int x, int y) { // for (int i = 0; i < array.getSize(); i += 2) { // true if (array[i] == x && array[i + 1] == y) return true; } // false return false; } // Host AidNorm static __host__ bool AidNorm(DynamicArrays *pcurve, int i, int count, DynamicArrays *psect, int pcount, int radius, DynamicArrays &Vertex, int x, int y) { // int j, dx1, dy1, dx2, dy2; int dis1, dis2; bool mark1, mark2; bool find = false; // i for (j = i + 1; j < count; j++) { // dx1 = pcurve[j][0]; dy1 = pcurve[j][1]; dx2 = pcurve[j][pcurve[j].getSize() - 2]; dy2 = pcurve[j][pcurve[j].getSize() - 1]; mark1 = false; mark2 = false; // i radius if (IsFindPoint(Vertex, dx1, dy1)) { // dis1 = (int)floor(sqrt((dx1 - x) * (dx1 - x) + (dy1 - y) * (dy1 - y))); if (dis1 <= radius) { mark1 = true; } } // i radius if(IsFindPoint(Vertex, dx2, dy2)) { // dis2 = (int)floor(sqrt((dx2 - x) * (dx2 - x) + (dy2 - y) * (dy2 - y))); if (dis2 <= radius) { mark2 = true; } } // i if (mark1 && mark2) { if (dis1 <= dis2) { psect[pcount].addTail(dx1, dy1); Vertex.delElem(dx1, dy1); } else { psect[pcount].addTail(dx2, dy2); Vertex.delElem(dx2, dy2); } find = true; } else if (mark1 && !mark2) { psect[pcount].addTail(dx1, dy1); Vertex.delElem(dx1, dy1); find = true; } else if (!mark1 && mark2) { psect[pcount].addTail(dx2, dy2); Vertex.delElem(dx2, dy2); find = true; } } // find return find; } // Host bpConnect static __host__ void bpConnect(DynamicArrays *pcurve, int count, int radius, DynamicArrays *psect, int *pcount, DynamicArrays &Vertex) { // int i, num; int x1, y1, x2, y2; bool find; // 0 *pcount = 0; // for (i = 0; i < count - 1; i++) { num = pcurve[i].getSize(); // x1 = pcurve[i][0]; y1 = pcurve[i][1]; x2 = pcurve[i][num - 2]; y2 = pcurve[i][num - 1]; find = false; // if (IsFindPoint(Vertex, x1, y1)) { // find = AidNorm(pcurve, i, count, psect, *pcount, radius, Vertex, x1, y1); // *pcount // if (find) { Vertex.delElem(x1, y1); psect[*pcount].addTail(x1, y1); *pcount += 1; } } find = false; // if (IsFindPoint(Vertex, x2, y2)) { // find = AidNorm(pcurve, i, count, psect, *pcount, radius, Vertex, x2, y2); // *pcount // if (find) { Vertex.delElem(x2, y2); psect[*pcount].addTail(x2, y2); *pcount += 1; } } } } // Host makeNode static __host__ void makeNode(DynamicArrays *pcurve, int count, DynamicArrays *pcurno) { // int num1 = 0, num2 = 1, num = 0; int i, j, size1, size2; int x1, y1, x2, y2; // bool bool find1, find2; // 0 1 pcurno[0].addTail(0, 1); // 2 num = 2; // for (i = 1; i < count; i++) { // find1 = find2 = false; // size2 = pcurve[i].getSize(); // for (j = i - 1; j >= 0; j--) { // size1 = pcurve[j].getSize(); // x1 = pcurve[j][0]; y1 = pcurve[j][1]; x2 = pcurve[j][size1 - 2]; y2 = pcurve[j][size1 - 1]; // if (pcurve[i][0] == x1 && pcurve[i][1] == y1) { num1 = pcurno[j][0]; find1 = true; } else if (pcurve[i][0] == x2 && pcurve[i][1] == y2) { num1 = pcurno[j][1]; find1 = true; } // if (pcurve[i][size2 - 2] == x1 && pcurve[i][size2 - 1] == y1) { num2 = pcurno[j][0]; find2 = true; } else if (pcurve[i][size2 - 2] == x2 && pcurve[i][size2 - 1] == y2) { num2 = pcurno[j][1]; find2 = true; } } // if (find1 && find2) { pcurno[i].addTail(num1, num2); // } else if (find1 && !find2) { pcurno[i].addTail(num1, num); num++; // } else if (!find1 && find2) { pcurno[i].addTail(num, num2); num++; // } else { pcurno[i].addTail(num, num + 1); num += 2; } } // for (i = 0; i < count; i++) { pcurno[i].addElem(num++); } } // Host openCurvePath static __host__ void openCurvePath(DynamicArrays *opencurnode, int *openNum, Graph *G, int start, int end) { // DynamicArrays edgestack, vexstack; // int vtop = -1, etop = -1; // int vstacksize, estacksize; // int curnode; // Edge *cur; // vexstack.addElem(start); // G->resetCurrent(); // while (vexstack.getSize() != 0) { // vstacksize = vexstack.getSize(); estacksize = edgestack.getSize(); // if (vexstack[vstacksize - 1] == end) { // for (int i = 0; i < estacksize; i++) { opencurnode[*openNum].addTail(vexstack[i], edgestack[i]); } // opencurnode[*openNum].addElem(end); // 1 *openNum += 1; // vexstack.delTail(vtop); edgestack.delTail(etop); // } else { // curnode = vexstack[vstacksize - 1]; // cur = G->vertexlist[curnode].current; // if (cur != NULL) { // // // if (!edgestack.findElem(cur->eno) && !vexstack.findElem(cur->jvex)) { vexstack.addElem(cur->jvex); edgestack.addElem(cur->eno); } G->vertexlist[curnode].current = cur->link; // // } else { vexstack.delTail(vtop); edgestack.delTail(etop); // if (vtop == start) break; // G->vertexlist[vtop].current = G->vertexlist[vtop].firstedge; } } } } // Host closeCurvePath static __host__ void closeCurvePath(DynamicArrays *closecurnode, int *closeNum, Graph *G, int insect) { // DynamicArrays edgestack, vexstack; // int vtop = -1, etop = -1; // int vstacksize, estacksize; // int curnode; // bool isFind; // Edge *cur; // vexstack.addElem(insect); // int num = *closeNum; // G->resetCurrent(); while (vexstack.getSize() != 0) { // vstacksize = vexstack.getSize(); estacksize = edgestack.getSize(); // isFind false isFind = false; // if (estacksize != 0 && vexstack[vstacksize - 1] == insect) { for (int i = 0; i < estacksize; i++) { closecurnode[num].addTail(vexstack[i], edgestack[i]); } closecurnode[num].addElem(insect); // for (int j = 0; j < num; j++) { if (IsArrayEqual(closecurnode[j], closecurnode[num])) { isFind = true; break; } } // if (isFind) { closecurnode[num].clear(); // 1 } else { num++; } // vexstack.delTail(vtop); edgestack.delTail(etop); // } else { // curnode = vexstack[vstacksize - 1]; // cur = G->vertexlist[curnode].current; // if (cur != NULL) { // // if (!edgestack.findElem(cur->eno)) { if ((cur->jvex == insect)|| !vexstack.findElem(cur->jvex)) { vexstack.addElem(cur->jvex); edgestack.addElem(cur->eno); } } G->vertexlist[curnode].current = cur->link; // // } else { vexstack.delTail(vtop); edgestack.delTail(etop); // if (vtop == insect) break; // G->vertexlist[vtop].current = G->vertexlist[vtop].firstedge; } } } // *closeNum = num; } // Host IsArrayEqual static __host__ bool IsArrayEqual(DynamicArrays object1, DynamicArrays object2) { // false if (object1.getSize() != object2.getSize()) { return false; // true false // } else { // int size = object1.getSize(); // int *p = object1.getCrvDatap(); // int temp; // int min; // for (int i = 0; i < size - 2; i++) { min = i; for (int j = i + 1; j < size - 1; j++) { if (p[j] < p[min]) { min = j; } } // if (min != i) { temp = p[i]; p[i] = p[min]; p[min] = temp; } } // int *q = object2.getCrvDatap(); // for (int i = 0; i < size - 2; i++) { min = i; for (int j = i + 1; j < size - 1; j++) { if (q[j] < q[min]) { min = j; } } // if (min != i) { temp = q[i]; q[i] = q[min]; q[min] = temp; } } // false for (int i = 0; i < size - 1; i++) { if (p[i] != q[i]) { return false; } } // true return true; } } // Host getPointNo static __host__ void getPointNo(DynamicArrays *pcurve, int count, DynamicArrays *pcurno, DynamicArrays &array, DynamicArrays &arrayno) { // int i, j; // int dx, dy; // for (i = 0; i < array.getSize();) { // xy dx = array[i++]; dy = array[i++]; // for (j = 0; j < count; j++) { // if (dx == pcurve[j][0] && dy == pcurve[j][1]) { arrayno.addElem(pcurno[j][0]); break; // } else if (dx == pcurve[j][pcurve[j].getSize() - 2] && dy == pcurve[j][pcurve[j].getSize() - 1]) { arrayno.addElem(pcurno[j][1]); break; } } } } // Host getCurveNonFormat static __host__ void getCurveNonFormat(DynamicArrays *curnode, DynamicArrays *pcurve, int count, DynamicArrays *pcurno, DynamicArrays *cur, int num, bool close) { // int nodesize; // int inode; // int vnum = pcurno[count - 1][2] - count + 1; // int icur; // int i, j; // int xtop, ytop; // for (i = 0; i < num; i++) { // nodesize = curnode[i].getSize(); // for (j = 0; j < nodesize;) { // inode = curnode[i][j++]; // if (j >= nodesize) break; // icur = curnode[i][j++] - vnum; // if (inode == pcurno[icur][0]) { cur[i].addArray(pcurve[icur]); if (j != nodesize - 1) { cur[i].delTail(ytop); cur[i].delTail(xtop); } } else if (inode == pcurno[icur][1]) { pcurve[icur].reverse(); cur[i].addArray(pcurve[icur]); if (j != nodesize - 1) { cur[i].delTail(ytop); cur[i].delTail(xtop); } pcurve[icur].reverse(); } } // if (close) { // cur[i].delTail(ytop); cur[i].delTail(xtop); } } } // Host freeCurve void freeCurve(Curve ***curveList, int count) { if (curveList == NULL) return; // for (int i = 0; i < count; i++) { CurveBasicOp::deleteCurve((*curveList)[i]); } delete [](*curveList); } // Kernel _traverseKer // static __global__ void _traverseKer(ImageCuda inimg, ImageCuda outimg, int *array1_dev, int *array2_dev, Template boxtpl) { // c r // x y c columnr row // 4 4 // r 4 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // // if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // int inidx = r * inimg.pitchBytes + c; // int outidx = r * inimg.imgMeta.width + c; // 0 if (inimg.imgMeta.imgData[inidx] == 0) { outimg.imgMeta.imgData[inidx] = 0; return; } int tmpidx; // int count = 0; // 0 int dx, dy; // int *p = boxtpl.tplData; // // 0 for (int i = 0; i < boxtpl.count; i++) { // x y // dx = c + *(p++); dy = r + *(p++); // if (dx == c && dy == r) continue; // tmpidx = dy * inimg.pitchBytes + dx; // 8 if (inimg.imgMeta.imgData[tmpidx] != 0) { count++; } } // count 0 0 // 0 if (count == 0) { outimg.imgMeta.imgData[inidx] = 0; return; // flag 3 // 0 } else if (count >= 3) { array2_dev[2 * outidx] = c; array2_dev[2 * outidx + 1] = r; outimg.imgMeta.imgData[inidx] = 0; // count 1 0 // } else if (count == 1) { array1_dev[2 * outidx] = c; array1_dev[2 * outidx + 1] = r; outimg.imgMeta.imgData[inidx] = inimg.imgMeta.imgData[inidx]; // flag 2, // } else { outimg.imgMeta.imgData[inidx] = inimg.imgMeta.imgData[inidx]; } } // Kernel _traverseKerNew static __global__ void _traverseKerNew(ImageCuda inimg, int *array1_dev) { // c r // x y c columnr row // 4 4 // r 4 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // // if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // int inidx = r * inimg.pitchBytes + c; // int outidx = r * inimg.imgMeta.width + c; // 0 if (inimg.imgMeta.imgData[inidx] != 0) { array1_dev[2 * outidx] = c; array1_dev[2 * outidx + 1] = r; } } // FAIL_CURVETRACING_FREE // #define FAIL_CURVETRACING_FREE do { \ if (outimg1 != NULL) { \ ImageBasicOp::deleteImage(outimg1); \ outimg1 = NULL; \ } \ if (outimg2 != NULL) { \ ImageBasicOp::deleteImage(outimg2); \ outimg2 = NULL; \ } \ if (tmpdev != NULL) { \ hipFree(tmpdev); \ tmpdev = NULL; \ } \ if (array1 != NULL) { \ delete []array1; \ array1 = NULL; \ } \ if (array2 != NULL) { \ delete []array2; \ array2 = NULL; \ } \ if (boxtpl != NULL) { \ TemplateFactory::putTemplate(boxtpl); \ boxtpl = NULL; \ } \ if (mark != NULL) { \ delete []mark; \ mark = NULL; \ } \ if (pcurve != NULL) { \ delete []pcurve; \ pcurve = NULL; \ } \ if (insect != NULL) { \ delete []insect; \ insect = NULL; \ } \ if (psect != NULL) { \ delete []psect; \ psect = NULL; \ } \ if (pcurno != NULL) { \ delete []pcurno; \ pcurno = NULL; \ } \ if (opencur != NULL) { \ delete []opencur; \ opencur = NULL; \ } \ if (closecur != NULL) { \ delete []closecur; \ closecur = NULL; \ } \ if (G != NULL) { \ delete G; \ G = NULL; \ } \ } while (0) // Host curveTracing // __host__ int CurveTracing::curveTracing(Image *inimg, Curve ***curveList, int *openNum, int *closeNum) { // if (inimg == NULL || curveList == NULL) return NULL_POINTER; // int errcode; hipError_t cuerrcode; // 1 2 Image *outimg1 = NULL; Image *outimg2 = NULL; // tmpdev int *tmpdev = NULL; // CPU int *array1 = NULL; int *array2 = NULL; // boxtpl Template *boxtpl = NULL; // int *mark = NULL; // DynamicArrays *pcurve = NULL; // DynamicArrays *insect = NULL; // DynamicArrays *psect = NULL; // ; DynamicArrays *pcurno = NULL; // DynamicArrays *opencur = NULL; // DynamicArrays *closecur = NULL; // Graph *G = NULL; // ImageBasicOp::newImage(&outimg1); ImageBasicOp::makeAtHost(outimg1, inimg->width, inimg->height); ImageBasicOp::newImage(&outimg2); ImageBasicOp::makeAtHost(outimg2, inimg->width, inimg->height); // Device errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } errcode = ImageBasicOp::copyToCurrentDevice(outimg1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } errcode = ImageBasicOp::copyToCurrentDevice(outimg2); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // ROI ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 1 ROI ImageCuda outsubimgCud1; errcode = ImageBasicOp::roiSubImage(outimg1, &outsubimgCud1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 2 ROI ImageCuda outsubimgCud2; errcode = ImageBasicOp::roiSubImage(outimg2, &outsubimgCud2); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // int tpl[16] = { -1, -1, 0, -1, 1, -1, 1, 0, 1, 1, 0, 1, -1, 1, -1, 0 }; // int i, j, k; // int num1 = 0, num2 = 0; // int dx, dy; // int arraysize = inimg->width * inimg->height * 2; int datasize = arraysize * 2 * sizeof(int); // cuerrcode = hipMalloc((void **)(&tmpdev), datasize); if (cuerrcode != hipSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // -1 cuerrcode = hipMemset(tmpdev, -1, datasize); if (cuerrcode != hipSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // int *array1_dev = tmpdev; int *array2_dev = tmpdev + arraysize; // dim3 boxsize(3, 3, 1); // errcode = TemplateFactory::getTemplate(&boxtpl, TF_SHAPE_BOX, boxsize, NULL); // NULL NULL if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // Device errcode = TemplateBasicOp::copyToCurrentDevice(boxtpl); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // Kernel dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (insubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // Kernel hipLaunchKernelGGL(( _traverseKer), dim3(gridsize), dim3(blocksize), 0, 0, insubimgCud, outsubimgCud1, array1_dev, array2_dev, *boxtpl); if (hipGetLastError() != hipSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // CPU array1 = new int[arraysize]; array2 = new int[arraysize]; // Host cuerrcode = hipMemcpy(array1, array1_dev, arraysize * sizeof (int), hipMemcpyDeviceToHost); if (cuerrcode != hipSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } cuerrcode = hipMemcpy(array2, array2_dev, arraysize * sizeof (int), hipMemcpyDeviceToHost); if (cuerrcode != hipSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // DynamicArrays Vertex, Intersect; // -1 for (i = 0; i < arraysize; i++) { if (array1[i] != -1) { Vertex.addElem(array1[i]); } } for (i = 0; i < arraysize; i++) { if (array2[i] != -1) { Intersect.addElem(array2[i]); } } // num1 = Vertex.getSize(); num2 = Intersect.getSize(); // // // if ((num1 && num2) || (num1 && !num2)) { // -1 cuerrcode = hipMemset(tmpdev, -1, datasize); if (cuerrcode != hipSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // hipLaunchKernelGGL(( _traverseKer), dim3(gridsize), dim3(blocksize), 0, 0, outsubimgCud1, outsubimgCud2, array1_dev, array2_dev, *boxtpl); if (hipGetLastError() != hipSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // Host cuerrcode = hipMemcpy(array1, array1_dev, arraysize * sizeof (int), hipMemcpyDeviceToHost); if (cuerrcode != hipSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // DynamicArrays Vertex1; for (i = 0; i < arraysize; i++) { if (array1[i] != -1) { Vertex1.addElem(array1[i]); } } // Device errcode = ImageBasicOp::copyToHost(outimg1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // mark = new int[arraysize / 2]; // 0 memset(mark, 0, sizeof(int) * arraysize / 2); // count int count = 0; // getCurve int test = 0; // pcurve = new DynamicArrays [Vertex1.getSize() / 2]; // getCurve for(i = 0; i < Vertex1.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex1[i], Vertex1[i + 1]); // test 0 count 1 1 if (test) { test = 0; continue; } count++; } // int x, y; // int sectnum = 0; // insect = new DynamicArrays [num2 / 2]; // while (Intersect.getSize()) { x = Intersect[0]; y = Intersect[1]; sectnum++; insectClassify(x, y, Intersect, insect, sectnum, tpl); } // // DynamicArrays realsect; // for (i = 0; i < sectnum; i++) { // 0 int maxvalue = 0; // int insect_x = insect[i][0], insect_y = insect[i][1]; // for (j = 0; j < insect[i].getSize(); j += 2) { x = insect[i][j]; y = insect[i][j + 1]; // int value = 0; for (k = 0; k < 16; k += 2) { dx = x + tpl[k]; dy = y + tpl[k + 1]; // for (int s = 0; s < insect[i].getSize(); s += 2) { if (dx == insect[i][s] && dy == insect[i][s + 1]) { value++; } } } // if (value > maxvalue) { maxvalue = value; insect_x = x; insect_y = y; } } // realsect.addElem(insect_x); realsect.addElem(insect_y); } // pcurve interAssemble(pcurve, count, insect, sectnum, realsect, tpl); // int pcount = 0; // psect = new DynamicArrays[Vertex.getSize() / 2]; // bpConnect(pcurve, count, radius, psect, &pcount, Vertex); // verAssemble(pcurve, count, psect, pcount, realsect); // pcurno = new DynamicArrays[count]; // makeNode(pcurve, count, pcurno); // int edgenum = count; // int vexnum = pcurno[count - 1][2] - edgenum + 1; // G = new Graph(vexnum, edgenum); // for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // DynamicArrays opencurnode[CURVE_VALUE], closecurnode[CURVE_VALUE]; // DynamicArrays vertexno; DynamicArrays intersectno; // getPointNo(pcurve, count, pcurno, Vertex, vertexno); // if (realsect.getSize() > 0) getPointNo(pcurve, count, pcurno, realsect, intersectno); // 0 *openNum = 0; *closeNum = 0; // for (i = 0; i < vertexno.getSize(); i++) { // int start, end; start = vertexno[i]; for (j = i + 1; j < vertexno.getSize(); j++) { end = vertexno[j]; // openCurvePath(opencurnode, openNum, G, start, end); } } // for (i = 0; i < intersectno.getSize(); i++) { // closeCurvePath(closecurnode, closeNum, G, intersectno[i]); } // opencur = new DynamicArrays[*openNum]; // closecur = new DynamicArrays[*closeNum]; // getCurveNonFormat(opencurnode, pcurve, count, pcurno, opencur, *openNum, false); // getCurveNonFormat(closecurnode, pcurve, count, pcurno, closecur, *closeNum, true); // int total = *openNum + *closeNum; // *curveList = new Curve *[total]; // size_t curveLength; // int *crvData; // for (i = 0; i < *openNum; i++) { // errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // freeCurve(curveList, i); FAIL_CURVETRACING_FREE; return OUT_OF_MEM; } // curveLength = (size_t)(opencur[i].getSize() / 2); // crvData = opencur[i].getCrvDatap(); if (crvData == NULL) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return NULL_POINTER; } // CPU errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return errcode; } } // for (; i < total; i++) { // errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // freeCurve(curveList, i); FAIL_CURVETRACING_FREE; return OUT_OF_MEM;; } // curveLength = (size_t)(closecur[i - *openNum].getSize() / 2); // crvData = closecur[i - *openNum].getCrvDatap(); if (crvData == NULL) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return NULL_POINTER; } // CPU errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return errcode; } } } // else if (!num1 && num2) { // -1 cuerrcode = hipMemset(tmpdev, -1, datasize); if (cuerrcode != hipSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // hipLaunchKernelGGL(( _traverseKer), dim3(gridsize), dim3(blocksize), 0, 0, outsubimgCud1, outsubimgCud2, array1_dev, array2_dev, *boxtpl); if (hipGetLastError() != hipSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // Host cuerrcode = hipMemcpy(array1, array1_dev, arraysize * sizeof (int), hipMemcpyDeviceToHost); if (cuerrcode != hipSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // DynamicArrays Vertex1; for (i = 0; i < arraysize; i++) { if (array1[i] != -1) { Vertex1.addElem(array1[i]); } } // Device errcode = ImageBasicOp::copyToHost(outimg1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // mark = new int[arraysize / 2]; // 0 memset(mark, 0, sizeof(int) * arraysize / 2); // count int count = 0; // getCurve int test = 0; // pcurve = new DynamicArrays [Vertex1.getSize() / 2]; // getCurve for(i = 0; i < Vertex1.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex1[i], Vertex1[i + 1]); // test 0 count 1 1 if (test) { test = 0; continue; } count++; } // int x, y; // int sectnum = 0; // insect = new DynamicArrays [num2 / 2]; // while (Intersect.getSize()) { x = Intersect[0]; y = Intersect[1]; sectnum++; insectClassify(x, y, Intersect, insect, sectnum, tpl); } // // DynamicArrays realsect; // for (i = 0; i < sectnum; i++) { // 0 int maxvalue = 0; // int insect_x = insect[i][0], insect_y = insect[i][1]; // for (j = 0; j < insect[i].getSize(); j += 2) { x = insect[i][j]; y = insect[i][j + 1]; // int value = 0; for (k = 0; k < 16; k += 2) { dx = x + tpl[k]; dy = y + tpl[k + 1]; // for (int s = 0; s < insect[i].getSize(); s += 2) { if (dx == insect[i][s] && dy == insect[i][s + 1]) { value++; } } } // if (value > maxvalue) { maxvalue = value; insect_x = x; insect_y = y; } } // realsect.addElem(insect_x); realsect.addElem(insect_y); } // pcurve interAssemble(pcurve, count, insect, sectnum, realsect, tpl); // pcurno = new DynamicArrays[count]; // makeNode(pcurve, count, pcurno); // int edgenum = count; // int vexnum = pcurno[count - 1][2] - edgenum + 1; // G = new Graph(vexnum, edgenum); // for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // DynamicArrays closecurnode[CURVE_VALUE]; // DynamicArrays intersectno; // getPointNo(pcurve, count, pcurno, realsect, intersectno); // 0 *openNum = 0; *closeNum = 0; // for (i = 0; i < intersectno.getSize(); i++) { // closeCurvePath(closecurnode, closeNum, G, intersectno[i]); } // closecur = new DynamicArrays[*closeNum]; // getCurveNonFormat(closecurnode, pcurve, count, pcurno, closecur, *closeNum, true); // int total = *openNum + *closeNum; // *curveList = new Curve *[total]; // size_t curveLength; // int *crvData; // for (i = 0; i < total; i++) { // errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // freeCurve(curveList, i); FAIL_CURVETRACING_FREE; return OUT_OF_MEM;; } // curveLength = (size_t)(closecur[i - *openNum].getSize() / 2); // crvData = closecur[i - *openNum].getCrvDatap(); if (crvData == NULL) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return NULL_POINTER; } // CPU errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return errcode; } } } // else { // -1 cuerrcode = hipMemset(array1_dev, -1, arraysize * sizeof (int)); if (cuerrcode != hipSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // hipLaunchKernelGGL(( _traverseKerNew), dim3(gridsize), dim3(blocksize), 0, 0, outsubimgCud1, array1_dev); if (hipGetLastError() != hipSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // Host cuerrcode = hipMemcpy(array1, array1_dev, arraysize * sizeof (int), hipMemcpyDeviceToHost); if (cuerrcode != hipSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // DynamicArrays point; // -1 for (i = 0; i < arraysize; i++) { if (array1[i] != -1) { point.addElem(array1[i]); } } // Device errcode = ImageBasicOp::copyToHost(outimg1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // mark = new int[arraysize / 2]; // 0 memset(mark, 0, sizeof(int) * arraysize / 2); // count int count = 0; // getCurve int test = 0; // pcurve = new DynamicArrays [point.getSize() / 2]; // getCurve for(i = 0; i < point.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, point[i], point[i + 1]); // test 0 count 1 1 if (test) { test = 0; continue; } count++; } // 0 *openNum = 0; *closeNum = 0; *closeNum = count; // int total = count; // *curveList = new Curve *[total]; // size_t curveLength; // int *crvData; // for (i = 0; i < total; i++) { // errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // freeCurve(curveList, i); FAIL_CURVETRACING_FREE; return OUT_OF_MEM;; } // curveLength = (size_t)(pcurve[i].getSize() / 2); // crvData = pcurve[i].getCrvDatap(); if (crvData == NULL) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return NULL_POINTER; } // CPU errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return errcode; } } } // FAIL_CURVETRACING_FREE; // return NO_ERROR; } // FAIL_CURVETRACING_FREE_CPU // #define FAIL_CURVETRACING_FREE_CPU do { \ if (outimg1 != NULL) { \ ImageBasicOp::deleteImage(outimg1); \ outimg1 = NULL; \ } \ if (outimg2 != NULL) { \ ImageBasicOp::deleteImage(outimg2); \ outimg2 = NULL; \ } \ if (mark != NULL) { \ delete []mark; \ mark = NULL; \ } \ if (pcurve != NULL) { \ delete []pcurve; \ pcurve = NULL; \ } \ if (insect != NULL) { \ delete []insect; \ insect = NULL; \ } \ if (pcurno != NULL) { \ delete []pcurno; \ pcurno = NULL; \ } \ if (opencur != NULL) { \ delete []opencur; \ opencur = NULL; \ } \ if (closecur != NULL) { \ delete []closecur; \ closecur = NULL; \ } \ if (G != NULL) { \ delete G; \ G = NULL; \ } \ } while (0) // Host curveTracingCPU // __host__ int CurveTracing::curveTracingCPU(Image *inimg, Curve ***curveList, int *openNum, int *closeNum) { // if (inimg == NULL || curveList == NULL) return NULL_POINTER; // int errcode; // 1 2 Image *outimg1 = NULL; Image *outimg2 = NULL; // int *mark = NULL; // DynamicArrays *pcurve = NULL; // DynamicArrays *insect = NULL; // ; DynamicArrays *pcurno = NULL; // DynamicArrays *opencur = NULL; // DynamicArrays *closecur = NULL; // Graph *G = NULL; // 1 2 ImageBasicOp::newImage(&outimg1); ImageBasicOp::makeAtHost(outimg1, inimg->width, inimg->height); ImageBasicOp::newImage(&outimg2); ImageBasicOp::makeAtHost(outimg2, inimg->width, inimg->height); // int tpl[16] = { -1, -1, 0, -1, 1, -1, 1, 0, 1, 1, 0, 1, -1, 1, -1, 0 }; // int num1 = 0, num2 = 0; // DynamicArrays Vertex; DynamicArrays Intersect; // int i, j, k; // int dx, dy; // traverse(Vertex, Intersect, inimg, outimg1, tpl); // num1 = Vertex.getSize(); num2 = Intersect.getSize(); // if (num1 && num2) { // DynamicArrays Vertex1, Intersect1; // traverse(Vertex1, Intersect1, outimg1, outimg2, tpl); // int maxnum = inimg->width * inimg->height; // mark = new int[maxnum]; // 0 memset(mark, 0, sizeof(int) * maxnum); // count int count = 0; // getCurve int test = 0; // DynamicArrays *pcurve = new DynamicArrays [Vertex1.getSize() / 2]; // getCurve for(i = 0; i < Vertex1.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex1[i], Vertex1[i + 1]); // test 0 count 1 1 if (test) { test = 0; continue; } count++; } // int x, y; // int sectnum = 0; // insect = new DynamicArrays [num2 / 2]; // while (Intersect.getSize()) { x = Intersect[0]; y = Intersect[1]; sectnum++; insectClassify(x, y, Intersect, insect, sectnum, tpl); } // // DynamicArrays realsect; // for (i = 0; i < sectnum; i++) { // 0 int maxvalue = 0; // int insect_x = insect[i][0], insect_y = insect[i][1]; // for (j = 0; j < insect[i].getSize(); j += 2) { x = insect[i][j]; y = insect[i][j + 1]; // int value = 0; for (k = 0; k < 16; k += 2) { dx = x + tpl[k]; dy = y + tpl[k + 1]; // for (int s = 0; s < insect[i].getSize(); s += 2) { if (dx == insect[i][s] && dy == insect[i][s + 1]) { value++; } } } // if (value > maxvalue) { maxvalue = value; insect_x = x; insect_y = y; } } // realsect.addElem(insect_x); realsect.addElem(insect_y); } // pcurve interAssemble(pcurve, count, insect, sectnum, realsect, tpl); // pcurno = new DynamicArrays[count]; // makeNode(pcurve, count, pcurno); // int edgenum = count; // int vexnum = pcurno[count - 1][2] - edgenum + 1; // G = new Graph(vexnum, edgenum); // for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // DynamicArrays opencurnode[CURVE_VALUE], closecurnode[CURVE_VALUE]; // DynamicArrays vertexno; DynamicArrays intersectno; // getPointNo(pcurve, count, pcurno, Vertex, vertexno); // getPointNo(pcurve, count, pcurno, realsect, intersectno); // 0 *openNum = 0; *closeNum = 0; // for (i = 0; i < vertexno.getSize(); i++) { // int start, end; start = vertexno[i]; for (j = i + 1; j < vertexno.getSize(); j++) { end = vertexno[j]; // openCurvePath(opencurnode, openNum, G, start, end); } } // for (i = 0; i < intersectno.getSize(); i++) { // closeCurvePath(closecurnode, closeNum, G, intersectno[i]); } // opencur = new DynamicArrays[*openNum]; // closecur = new DynamicArrays[*closeNum]; // getCurveNonFormat(opencurnode, pcurve, count, pcurno, opencur, *openNum, false); // getCurveNonFormat(closecurnode, pcurve, count, pcurno, closecur, *closeNum, true); // int total = *openNum + *closeNum; // *curveList = new Curve *[total]; // size_t curveLength; // int *crvData; // for (i = 0; i < *openNum; i++) { // errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM; } // curveLength = (size_t)(opencur[i].getSize() / 2); // crvData = opencur[i].getCrvDatap(); if (crvData == NULL) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // CPU errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } // for (; i < total; i++) { // errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM;; } // curveLength = (size_t)(closecur[i - *openNum].getSize() / 2); // crvData = closecur[i - *openNum].getCrvDatap(); if (crvData == NULL) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // CPU errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } } // else if (num1 && !num2) { // int maxnum = inimg->width * inimg->height; // mark = new int[maxnum]; // 0 memset(mark, 0, sizeof(int) * maxnum); // count int count = 0; // getCurve int test = 0; // DynamicArrays *pcurve = new DynamicArrays [Vertex.getSize() / 2]; // getCurve for(i = 0; i < Vertex.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex[i], Vertex[i + 1]); // test 0 count 1 1 if (test) { test = 0; continue; } count++; } // pcurno = new DynamicArrays[count]; // makeNode(pcurve, count, pcurno); // int edgenum = count; // int vexnum = pcurno[count - 1][2] - edgenum + 1; // G = new Graph(vexnum, edgenum); // for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // DynamicArrays opencurnode[CURVE_VALUE]; // DynamicArrays vertexno; // getPointNo(pcurve, count, pcurno, Vertex, vertexno); // 0 *openNum = 0; *closeNum = 0; // for (i = 0; i < vertexno.getSize(); i++) { // int start, end; start = vertexno[i]; for (j = i + 1; j < vertexno.getSize(); j++) { end = vertexno[j]; // openCurvePath(opencurnode, openNum, G, start, end); } } // opencur = new DynamicArrays[*openNum]; // getCurveNonFormat(opencurnode, pcurve, count, pcurno, opencur, *openNum, false); // int total = *openNum; // *curveList = new Curve *[total]; // size_t curveLength; // int *crvData; // for (i = 0; i < *openNum; i++) { // errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM; } // curveLength = (size_t)(opencur[i].getSize() / 2); // crvData = opencur[i].getCrvDatap(); if (crvData == NULL) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // CPU errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } } // else if (!num1 && num2) { // DynamicArrays Vertex1, Intersect1; // traverse(Vertex1, Intersect1, outimg1, outimg2, tpl); // int maxnum = inimg->width * inimg->height; // mark = new int[maxnum]; // 0 memset(mark, 0, sizeof(int) * maxnum); // count int count = 0; // getCurve int test = 0; // DynamicArrays *pcurve = new DynamicArrays [Vertex1.getSize() / 2]; // getCurve for(i = 0; i < Vertex1.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex1[i], Vertex1[i + 1]); // test 0 count 1 1 if (test) { test = 0; continue; } count++; } // int x, y; // int sectnum = 0; // insect = new DynamicArrays [num2 / 2]; // while (Intersect.getSize()) { x = Intersect[0]; y = Intersect[1]; sectnum++; insectClassify(x, y, Intersect, insect, sectnum, tpl); } // // DynamicArrays realsect; // for (i = 0; i < sectnum; i++) { // 0 int maxvalue = 0; // int insect_x = insect[i][0], insect_y = insect[i][1]; // for (j = 0; j < insect[i].getSize(); j += 2) { x = insect[i][j]; y = insect[i][j + 1]; // int value = 0; for (k = 0; k < 16; k += 2) { dx = x + tpl[k]; dy = y + tpl[k + 1]; // for (int s = 0; s < insect[i].getSize(); s += 2) { if (dx == insect[i][s] && dy == insect[i][s + 1]) { value++; } } } // if (value > maxvalue) { maxvalue = value; insect_x = x; insect_y = y; } } // realsect.addElem(insect_x); realsect.addElem(insect_y); } // pcurve interAssemble(pcurve, count, insect, sectnum, realsect, tpl); // pcurno = new DynamicArrays[count]; // makeNode(pcurve, count, pcurno); // int edgenum = count; // int vexnum = pcurno[count - 1][2] - edgenum + 1; // G = new Graph(vexnum, edgenum); // for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // DynamicArrays closecurnode[CURVE_VALUE]; // DynamicArrays intersectno; // getPointNo(pcurve, count, pcurno, realsect, intersectno); // 0 *openNum = 0; *closeNum = 0; // for (i = 0; i < intersectno.getSize(); i++) { // closeCurvePath(closecurnode, closeNum, G, intersectno[i]); } // closecur = new DynamicArrays[*closeNum]; // getCurveNonFormat(closecurnode, pcurve, count, pcurno, closecur, *closeNum, true); // int total = *openNum + *closeNum; // *curveList = new Curve *[total]; // size_t curveLength; // int *crvData; // for (i = 0; i < total; i++) { // errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM;; } // curveLength = (size_t)(closecur[i - *openNum].getSize() / 2); // crvData = closecur[i - *openNum].getCrvDatap(); if (crvData == NULL) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // CPU errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } } // else { // DynamicArrays point; // traverseNew(point, outimg1); // int maxnum = inimg->width * inimg->height; // mark = new int[maxnum]; // 0 memset(mark, 0, sizeof(int) * maxnum); // count int count = 0; // getCurve int test = 0; // pcurve = new DynamicArrays [point.getSize()]; // getCurve for(i = 0; i < point.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, point[i], point[i + 1]); // test 0 count 1 1 if (test) { test = 0; continue; } count++; } // 0 *openNum = 0; *closeNum = 0; *closeNum = count; // int total = *openNum + *closeNum; // *curveList = new Curve *[total]; // size_t curveLength; // int *crvData; // for (i = 0; i < total; i++) { // errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM;; } // curveLength = (size_t)(pcurve[i].getSize() / 2); // crvData = pcurve[i].getCrvDatap(); if (crvData == NULL) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // CPU errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } } // FAIL_CURVETRACING_FREE_CPU; // return NO_ERROR; }
2b876f02dd0fc9cd73cd097f9be0c92df933dbd3.cu
// CurveTracing // 实现的曲线跟踪 #include "CurveTracing.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; #include "Template.h" #include "TemplateFactory.h" // 宏:CURVE_VALUE(曲线最大数目) // 设置图像能获得的曲线最大数目 #define CURVE_VALUE 1000 // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // Kernel 函数:_traverseKer(并行遍历图像得到端点数组和交点数组,并且得到去掉 // 交点后的输出图像) // 遍历图像,得到曲线的所有端点坐标和交点坐标,并且得到去掉交点后的输出图像, // 对每个像素点取其周围八领域像素点,如果八领域像素点的个数为 1,则这个为端点, // 若八领域像素点的个数为大于等于 3,则认为这个点作为伪交点,存储起来,这些伪 // 交点中有部分是真正的交点,后面计算需要从一堆伪交点中得到真正的交点。 static __global__ void // Kernel 函数无返回值 _traverseKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 去掉交点后的输出图像 int *array1_dev, // 存储端点的数组 int *array2_dev, // 存储交点的数组 Template boxtpl // 3 * 3 领域模板 ); // Kernel 函数:_traverseKerNew(遍历图像,得到图像上所有的像素点) // 遍历图像,保存图像上所有灰度值不为 0 的像素点,主要用于 CPU 串行代码中第二次 // 遍历的实现 static __global__ void // Kernel 函数无返回值 _traverseKerNew( ImageCuda inimg, // 输入图像 int *array1_dev // 存储端点的数组 ); // Host 函数:traverse(遍历图像,得到端点坐标、交点坐标及去掉交点后的图像) // 遍历图像,得到曲线的所有端点坐标和交点坐标,并且得到去掉交点后的输出图像, // 对每个像素点取其周围八领域像素点,如果八领域像素点的个数为 1,则这个为端点, // 若八领域像素点的个数为大于等于 3,则认为这个点作为伪交点,存储起来,这些伪 // 交点中有部分是真正的交点,后面计算需要从一堆伪交点中得到真正的交点。主要是 // 用于 CPU 串行代码的实现中处理 static __host__ void // 无返回值 traverse( DynamicArrays &Vertex, // 存储端点的动态数组 DynamicArrays &Intersect, // 存储伪交点的动态数组 Image *inimg, // 输入图像 Image *outimg, // 输出图像 int *tpl // 八领域模板 ); // Host 函数:traverseNew(遍历图像,得到图像上所有的像素点) // 遍历图像,保存图像上所有灰度值不为 0 的像素点,主要用于 CPU 串行代码中第二次 // 遍历的实现 static __host__ void // 无返回值 traverseNew( DynamicArrays &array, //存储点的坐标 Image *inimg // 输入图像 ); // Host 函数:getCurve(得到去掉交点后的所有曲线段) // 递归调用函数,得到去掉交点后的所有曲线段,并且这些曲线段都是非闭合曲线 static __host__ void // 无返回值 getCurve( DynamicArrays *pcurve, // 存储所有提取到的非闭合曲线 int &test, // 检测某端点开始的曲线是否已提取过 int count, // 曲线条数 Image *img, // 输入图像,待提取曲线的图像 int *mark, // 标志数组,大小为图像大小,表示像素点是否 // 访问,初始都为 0,如果访问则对应位置设为 1 int *tpl, // 八领域模板 int Vx, // 提取的曲线起点 x 坐标 int Vy // 提取的曲线起点 y 坐标 ); // Host 函数:insectClassify(从得到的一堆交点中,进行分类,确定交点个数) // 递归调用函数,实现原先得到的一堆交点进行分类,每一类是一部分点集,并且同一类 // 的点集是连通的,这些点集中可以找到一个合适的交点,同时根据分类的结果可以得到 // 交点的个数,有多少类就有多少交点 static __host__ void // 无返回值 insectClassify( int x, // 点的 x 坐标 int y, // 点的 y 坐标 DynamicArrays &Intersect, // 存储交点的动态数组 DynamicArrays *insect, // 存储分类的结果 int sectnum, // 交点个数,即分类的类数 int *tpl // 八领域模板 ); // Host 函数:makeCur(根据两点坐标得到一条曲线) // 根据两点坐标得到一条曲线,两个点的连线方式为从第一个点开始,先从对角线往 // 第二个点移动,如果第二个点的 x 或者 y 坐标的值与对角线 45° 移动的对应坐标值 // 一样,则沿着 x 或者 y 坐标移动直到重合,从而得到一条简短曲线 static __host__ void // 无返回值 makeCur( DynamicArrays &cur, // 存储得到的曲线 int dx1, // 曲线第一个点的 x 坐标 int dy1, // 曲线第一个点的 y 坐标 int dx2, // 曲线第一个点的 x 坐标 int dy2 // 曲线第一个点的 y 坐标 ); // Host 函数:interAssemble(交点曲线与原先得到的曲线进行重组,得到重组后曲线) // 根据得到的交点扩散出的曲线和原先得到的非闭合曲线进行重组,得到重组后曲线, // 以便之后的曲线还原 static __host__ void // 无返回值 interAssemble( DynamicArrays *pcurve, // 非闭合曲线集 int count, // 曲线条数 DynamicArrays *insect, // 交点分类的结果 int sectnum, // 交点曲线条数 DynamicArrays realsect, // 真正的交点数组 int *tpl // 八领域模板 ); // Host 函数:bpConnect(根据用户输入的半径得到近域点集) // 根据用户输入的半径得到近域点集,并且更新端点动态数组 static __host__ void bpConnect( DynamicArrays *pcurve, // 输入的曲线集 int count, // 输入的曲线集条数 int radius, // 半径大小参数 DynamicArrays *psect, // 得到新增加的近域点集 int *pcount, // 得到新增加的交点个数 DynamicArrays &Vertex // 存储端点的动态数组 ); // Host 函数:AidNorm(判断两个端点之间距离是否在领域大小内,若在则添加到点集中) // 判断两个端点之间距离是否在领域大小内,如果在领域半径大小内,则把找到的端点加 // 入到新增加的近域点集 static __host__ bool // 返回值为 bool 型,如果表示相同就返回 true, // 否则返回 false AidNorm( DynamicArrays *pcurve, // 输入的曲线集 int i, // 从编号为 i 的曲线往后搜索 int count, // 输入的曲线集条数 DynamicArrays *psect, // 新增加的近域点集 int pcount, // 新增加的交点个数 int radius, // 半径大小参数 DynamicArrays &Vertex, // 存储端点的动态数组 int x, int y // 曲线的端点坐标 ); // Host 函数:pcurveAcord(根据坐标得到曲线的编号) // 根据曲线的端点坐标查找曲线的编号,遍历所有曲线的端点,查找是否存在和给定的坐 // 标相等的点,则得到相应的返回结果。 static __host__ int // 返回值,如果找到的是曲线首部返回 0, // 如果找到的是曲线尾部则返回 1,否则返回 -1。 pcurveAcord( DynamicArrays *pcurve, // 输入的曲线集 int count, // 输入的曲线集条数 int &location, // 得到曲线的编号 int x, int y // 端点坐标 ); // Host 函数:verAssemble(断点重组,根据近域点集重组曲线集) // 根据近域点集重组曲线集,根据近域的每个集合里的那些点,进行计算,得到其中 // 最合适的点作为中心点,这个中心点也即是一个新产生的交点,然后发散出去多条曲线, // 把这些曲线更新到原来的曲线集中。更抽象成层的含义,断点的重组,根据用户输入的 // 半径进行曲线端点组合,如果两个端点离得太近就变成一段连续的曲线, // 达到的端点的连接性。 static __host__ void verAssemble( DynamicArrays *pcurve, // 曲线集 int count, // 曲线集的条数 DynamicArrays *psect, // 近域点集 int pcount, // 近域交点个数 DynamicArrays &realsect // 更新交点集合 ); // Host 函数:IsFindPoint(判断坐标是不是坐标集动态数组里的点) static __host__ bool // 返回值为 bool 型,如果表示相同就返回 true, // 否则返回 false IsFindPoint( DynamicArrays &array, // 判断该坐标是不是动态数组里的点集 int x, int y // 坐标 ); // Host 函数:makeNode(根据曲线的起点和终点,以及边的情况,得到曲线的编号) // 根据曲线的起点和终点,以及边的情况,从而得到曲线的编号,并且编号的起点和终点 // 是唯一的,也不会和边的编号重复,为之后构图提供方便 static __host__ void // 无返回值 makeNode( DynamicArrays *pcurve, // 输入的曲线集 int count, // 曲线的条数 DynamicArrays *pcurno // 存储曲线的编号 ); // Host 函数:openCurvePath(得到非闭合曲线编号序列) // 根据图的搜索得到非闭合曲线对应的编号序列,用于得到从 start 到 end 的所有路径 static __host__ void // 无返回值 openCurvePath( DynamicArrays *opencurnode, // 存储非闭合曲线编号集 int *openNum, // 得到非闭合曲线的条数 Graph *G, // 曲线构建的图 int start, // 搜索的起点 int end // 搜索的终点 ); // Host 函数:closeCurvePath(得到闭合曲线编号序列) // 根据图的搜索得到闭合曲线对应的编号序列,用于得到从 start 到 end 的所有路径 static __host__ void // 无返回值 closeCurvePath( DynamicArrays *closecurnode, // 存储闭合曲线编号集 int *closeNum, // 得到闭合曲线的条数 Graph *G, // 曲线构建的图 int insect // 搜索的起点,闭合曲线起点和终点一样 ); // Host 函数:IsArrayEqual(判断两个动态数组表示的曲线是否表示同一条曲线) // 判断两个动态数组表示的曲线是否表示同一条曲线,首先得到的是曲线编号,且不会 // 出现编号顺序一致的数组,可能会出现数量和编号一样但是顺序不一样的数组,排序后 // 比较结果,主要用于闭合曲线的提取,由于闭合曲线头尾编号一样,排序比较的时候 // 不算最后编号数 static __host__ bool // 返回值为 bool 型,如果表示相同就返回 true, // 否则返回 false IsArrayEqual( DynamicArrays object1, // 动态数组1 DynamicArrays object2 // 动态数组2 ); // Host 函数:getPointNo(根据坐标对得到数组内对应编号) // 通过得到的曲线序列,及首尾编号,得到点坐标对的数组对应编号 static __host__ void // 无返回值 getPointNo( DynamicArrays *pcurve, // 提取的曲线序列 int count, // 曲线数目 DynamicArrays *pcurno, // 与曲线序列相对应的首尾编号 DynamicArrays &array, // 点坐标对数组 DynamicArrays &arrayno // 存储得到的对应编号 ); // Host 函数:getCurveNonFormat(得到非格式化输出曲线数据有序序列) // 通过曲线编号集合和首尾编号集得到非格式化输出曲线数据有序序列 static __host__ void // 无返回值 getCurveNonFormat( DynamicArrays *curnode, // 曲线编号集 DynamicArrays *pcurve, // 提取的曲线序列 int count, // 提取的曲线序列的数量 DynamicArrays *pcurno, // 与曲线序列相对应的首尾编号 DynamicArrays *cur, // 最终得到的曲线非格式输出数据 int num, // 曲线的数量 bool close = false // 标志闭合还是非闭合曲线,默认为非闭合 ); // Host 函数:traverse(遍历图像,得到端点坐标、交点坐标及去掉交点后的图像) static __host__ void traverse(DynamicArrays &Vertex, DynamicArrays &Intersect, Image *inimg, Image *outimg,int *tpl) { // 定义临时变量,用于循环 int i, j, k; // 定义临时变量,存储八领域的值 int dx, dy; // 对每一个像素值不为 0 的像素点进行八领域处理 for (i = 0; i < inimg->height; i++) { for(j = 0; j < inimg->width; j++) { // 如果该像素点为 0 则扫描下一个像素点 if (inimg->imgData[i * inimg->width + j] == 0) { outimg->imgData[i * inimg->width + j] = 0; continue; } // 定义变量并且初始化为 0,用于取八领域下标 int m = 0; // 定义变量并且初始化为 0,用于得到八领域内有多少个像素值不为 0 的点 int flag = 0; for(k = 0; k < 8; k++) { dx = j + tpl[m++]; dy = i + tpl[m++]; // 符合要求的八领域内的点的像素值如果不为 0,就累加到 flag 中 if (dx >= 0 && dx < inimg->width && dy >= 0 && dy < inimg->height) { if (inimg->imgData[dy * inimg->width + dx] != 0) { flag++; } } } // 如果 flag 为 0,表示该像素八领域没有不为 0 的像素点,则该点是 // 孤立点,则给对应输出图像在该处赋值为 0 if (flag == 0) { outimg->imgData[i * inimg->width + j] = 0; // 如果 flag 为 1,表示该像素八领域有一个不为 0 的像素点,则该点是 // 曲线端点,并给对应输出图像在该处赋值原图像对应点像素值 } else if (flag == 1) { Vertex.addElem(j); Vertex.addElem(i); outimg->imgData[i * inimg->width + j] = inimg->imgData[i * inimg->width + j]; // 如果 flag 大于等于 3,表示该像素点作为曲线交点,则给对应输出图像 // 在该处赋值为 0 } else if (flag >= 3) { Intersect.addElem(j); Intersect.addElem(i); outimg->imgData[i * inimg->width + j] = 0; // 否则flag则为 2,表示该像素点作为曲线上的点,并给对应输出图像在该处 // 赋值原图像对应点像素值 } else { outimg->imgData[i * inimg->width + j] = inimg->imgData[i * inimg->width + j]; } } } } // Host 函数:traverseNew(遍历图像,得到图像上所有的像素点) static __host__ void traverseNew(DynamicArrays &array, Image *inimg) { // 定义临时变量,用于循环 int i, j; // 对每一个像素值不为 0 的像素点进行八领域处理 for (i = 0; i < inimg->height; i++) { for(j = 0; j < inimg->width; j++) { // 如果该像素点不为 0 则保存 if (inimg->imgData[i * inimg->width + j] != 0) { // 得到所有灰度值不为 0 的像素点 array.addElem(j); array.addElem(i); } } } } // Host 函数:getCurve(得到去掉交点后的所有曲线段) static __host__ void getCurve(DynamicArrays *pcurve, int &test, int count, Image *img, int *mark, int *tpl, int Vx, int Vy) { // 标志点是否已经访问过,如果访问过,test 加 1,并且退出,主要是判断该端点 // 是否和另一个端点是同一条曲线,如果是就不需要再重复提取 if (mark[Vy * img->width + Vx] == 1) { test++; return; } // 定义临时变量,存储八领域的值 int dx, dy; int j = 0; // 定义变量,用于循环 // 定义标志,表示八领域是否还有像素值不为 0 的点 int flag = 0; // 把该点的坐标值加入第 count 条曲线中,并且设置标志该点已经访问过 pcurve[count].addElem(Vx); pcurve[count].addElem(Vy); mark[Vy * img->width + Vx] = 1; // 按顺时针访问八领域的像素点 for(int i = 0; i < 8; i++) { dx = Vx + tpl[j++]; dy = Vy + tpl[j++]; // 得到第一个不为 0 并且没有访问过的像素点就退出循环,并且标志 flag 为 1 if (img->imgData[dy * img->width + dx] != 0 && mark[dy * img->width + dx] != 1) { flag = 1; break; } } // 如果 flag 为 1,说明找到了一个曲线上的点,以该点递归调用函数 if (flag == 1) { getCurve(pcurve, test, count, img, mark, tpl, dx, dy); } // 如果找不到了,说明已经全部搜索完,退出 return; } // Host 函数:insectClassify(从得到的一堆交点中,进行分类,确定交点个数) static __host__ void insectClassify(int x, int y, DynamicArrays &Intersect, DynamicArrays *insect, int sectnum, int *tpl) { // 把 x,y 坐标加入交点曲线中 insect[sectnum - 1].addElem(x); insect[sectnum - 1].addElem(y); // 加入完后就删除交点数组中的 x,y 坐标 Intersect.delElem(x, y); // if (Intersect.getSize() == 0) return; // 定义临时变量,存储八领域的坐标点 int dx, dy; for(int i = 0; i < 16; i += 2) { dx = x + tpl[i]; dy = y + tpl[i + 1]; // 寻找到交点中是否有和八领域一样的坐标点,若有,则递归调用函数 for(int j = 0; j < Intersect.getSize(); j += 2) { if (dx == Intersect[j] && dy == Intersect[j + 1]) { insectClassify(dx, dy, Intersect, insect, sectnum, tpl); } } } // 返回 return; } // Host 函数:makeCur(根据两点坐标得到一条曲线) static __host__ void makeCur(DynamicArrays &cur, int dx1, int dy1, int dx2, int dy2) { // 定义临时变量,存储坐标值 int x, y; // 首先把起始点加入临时曲线中 cur.addElem(dx1); cur.addElem(dy1); // 如果两坐标值一样,则返回,无须后续步骤 if (dx1 == dx2 && dy1 == dy2) return; // 分别计算两坐标值的差 int m = dx1 - dx2, n = dy1 - dy2; // 设置起始点 x = dx1; y = dy1; // 通过差值开始给交点曲线赋值,首先通过差值相对可以分成四个象限,第一、 // 第二、第三、第四象限,并且以第一个点为中心开始。 // 如果 m >= 0 并且 n >= 0,则表示第二个点相对第一个点在第一象限或者坐标轴 if (m >= 0 && n >= 0) { // 计算坐标差值的差 int d = m - n; // 根据差值的差给交点曲线赋值 if (d >= 0) { for (int c = 0; c < n; c++) { x--; y--; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < d; c++) { x--; cur.addElem(x); cur.addElem(y); } } else { for (int c = 0; c < m; c++) { x--; y--; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < -d; c++) { y--; cur.addElem(x); cur.addElem(y); } } // 如果 m >= 0 并且 n < 0,则表示第二个点相对第一个点在第四象限或者坐标轴 } else if (m >= 0 && n < 0) { n = -n; int d = m - n; if (d >= 0) { for (int c = 0; c < n; c++) { x--; y++; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < d; c++) { x--; cur.addElem(x); cur.addElem(y); } } else { for (int c = 0; c < m; c++) { x--; y++; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < -d; c++) { y++; cur.addElem(x); cur.addElem(y); } } // 如果 m < 0 并且 n >= 0,则表示第二个点相对第一个点在第二象限或者坐标轴 } else if (m < 0 && n >= 0) { m = -m; int d = m - n; if (d >= 0) { for (int c = 0; c < n; c++) { x++; y--; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < d; c++) { x++; cur.addElem(x); cur.addElem(y); } } else { for (int c = 0; c < m; c++) { x++; y--; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < -d; c++) { y--; cur.addElem(x); cur.addElem(y); } } // 否则 m < 0 并且 n < 0,则表示第二个点相对第一个点在第三象限 } else { m = -m; n = -n; int d = m - n; if (d >= 0) { for (int c = 0; c < n; c++) { x++; y++; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < d; c++) { x++; cur.addElem(x); cur.addElem(y); } } else { for (int c = 0; c < m; c++) { x++; y++; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < -d; c++) { y++; cur.addElem(x); cur.addElem(y); } } } } // Host 函数:interAssemble(交点曲线与原先得到的曲线进行重组,得到重组后曲线) static __host__ void interAssemble(DynamicArrays *pcurve, int count, DynamicArrays *insect, int sectnum, DynamicArrays realsect, int *tpl) { // 如果没有交点则直接返回 if (realsect.getSize() == 0) return; // 定义临时变量 int i, j, k, x1, y1, x2, y2, dx1, dy1, dx2, dy2, num, num2; int mark1, mark2, flag1, flag2; // 对每一条得到的曲线,先得到其首尾端点,进行八领域寻找交点曲线的尾端点, // 如果找到就把交点曲线添加到原曲线中,实现交点曲线与原先得到的曲线重组 for(i = 0; i < count; i++) { // 初始化首尾都没有找到交点曲线的尾端点 flag1 = 0; flag2 = 0; // 初始化找到的交点曲线的曲线下标为 -1 mark1 = -1; mark2 = -1; // 得到原曲线动态数组的大小 num = pcurve[i].getSize(); // 得到原曲线的首尾端点坐标 x1 = pcurve[i][0]; y1 = pcurve[i][1]; x2 = pcurve[i][num - 2]; y2 = pcurve[i][num - 1]; // 首先对原曲线的首端点开始进行查找 for (j = 0; j < 16; j += 2) { // 得到八领域的坐标 dx1 = x1 + tpl[j]; dy1 = y1 + tpl[j + 1]; // 进行查找,找到退出循环 for (k = 0; k < sectnum; k++) { // 得到交点曲线的动态数组的大小 num2 = insect[k].getSize(); // 找到就相应赋值,并且退出循环 for (int m = 0; m < num2; m += 2) { if (dx1 == insect[k][m] && dy1 == insect[k][m + 1]) { mark1 = k; flag1 += 1; break; } } // 找到退出循环 if (flag1) { break; } } // 找到退出循环 if (flag1) { break; } } // 对原曲线的尾端点开始进行查找 for (j = 0; j < 16; j += 2) { // 得到八领域的坐标 dx2 = x2 + tpl[j]; dy2 = y2 + tpl[j + 1]; // 进行查找,找到退出循环 for (k = 0; k < sectnum; k++) { // 得到交点曲线的动态数组的大小 num2 = insect[k].getSize(); // 找到就相应赋值,并且退出循环 for (int m = 0; m < num2; m += 2) { if (dx2 == insect[k][m] && dy2 == insect[k][m + 1]) { mark2 = k; flag2 += 1; break; } } // 找到退出循环 if (flag2) { break; } } // 找到退出循环 if (flag2) { break; } } // 如果没有找到可以组合的交点曲线,则进行下一个循环 if (mark1 < 0 && mark2 < 0) { continue; } // 如果首部找到了可以组合的交点曲线,尾部没有,则原曲线反转,然后把交点 // 曲线添加到反转后的曲线后边 if (mark1 >= 0 && mark2 < 0) { // 曲线反转 pcurve[i].reverse(); // 构造曲线加入到当前曲线中 DynamicArrays temp; makeCur(temp, dx1, dy1, realsect[2 * mark1], realsect[2 * mark1 + 1]); pcurve[i].addArray(temp); // 如果尾部找到了可以组合的交点曲线,首部没有,直接把交点曲线添加到原来 // 曲线后边 } else if (mark1 < 0 && mark2 >= 0) { // 构造曲线加入到当前曲线中 DynamicArrays temp; makeCur(temp, dx2, dy2, realsect[2 * mark2], realsect[2 * mark2 + 1]); pcurve[i].addArray(temp); // 如果首部和尾部都找到了可以组合的交点曲线,先把尾部找到的交点曲线添加 // 到原来曲线后边,然后反转曲线,然后把首部找到的交点曲线添加到反转后的 // 曲线后边 } else { // 构造曲线加入到当前曲线中 DynamicArrays temp; makeCur(temp, dx2, dy2, realsect[2 * mark2], realsect[2 * mark2 + 1]); pcurve[i].addArray(temp); // 清空得到的曲线 temp.clear(); // 曲线反转 pcurve[i].reverse(); // 构造曲线加入到当前曲线中 makeCur(temp, dx1, dy1, realsect[2 * mark1], realsect[2 * mark1 + 1]); pcurve[i].addArray(temp); } } } // Host 函数:pcurveAcord(根据坐标得到曲线的编号) static __host__ int pcurveAcord(DynamicArrays *pcurve, int count, int &location, int x, int y) { // 定义临时变量 int i, dx1, dy1, dx2, dy2; // 根据输入坐标查找曲线集中对应的曲线编号 location for (i = 0; i < count; i++) { // 得到曲线的两个端点 dx1 = pcurve[i][0]; dy1 = pcurve[i][1]; dx2 = pcurve[i][pcurve[i].getSize() - 2]; dy2 = pcurve[i][pcurve[i].getSize() - 1]; // 根据端点查找对应的曲线,如果找到则返回首尾情况,表示端点是曲线的首部 // 还是尾部, 0 表示曲线首部,1 表示尾部 if ((dx1 == x) && (dy1 == y)) { location = i; return 0; } if ((dx2 == x) && (dy2 == y)) { location = i; return 1; } } // 如果没有找到则返回 -1 return -1; } // Host 函数:verAssemble(根据近域点集重组曲线集) static __host__ void verAssemble(DynamicArrays *pcurve, int count, DynamicArrays *psect, int pcount, DynamicArrays &realsect) { // 定义临时变量 int i, j, dx, dy, mark, location; int cen_x, cen_y; // 计算得到每个点集中的最中心点,加入到交点集合中 for (i = 0; i < pcount; i++) { cen_x = 0; cen_y = 0; for (j = 0; j < psect[i].getSize();) { cen_x += psect[i][j++]; cen_y += psect[i][j++]; } // 得到最中心点 cen_x = cen_x * 2 / j; cen_y = cen_y * 2 / j; realsect.addTail(cen_x, cen_y); // 组合曲线,更新曲线集合和交点动态数组 for (j = 0; j < psect[i].getSize();) { dx = psect[i][j++]; dy = psect[i][j++]; if ((mark = pcurveAcord(pcurve, count, location, dx, dy)) != -1) { if(!mark) { pcurve[location].reverse(); } DynamicArrays temp; makeCur(temp, dx, dy, cen_x, cen_y); temp.delElemXY(dx, dy); pcurve[location].addArray(temp); } } } } // Host 函数:IsFindPoint(判断坐标是不是坐标集动态数组里的点) static __host__ bool IsFindPoint(DynamicArrays &array, int x, int y) { // 遍历动态数组里的点 for (int i = 0; i < array.getSize(); i += 2) { // 找到就返回 true if (array[i] == x && array[i + 1] == y) return true; } // 没有找到则返回 false return false; } // Host 函数:AidNorm(判断两个端点之间距离是否在领域大小内,若在则添加到点集中) static __host__ bool AidNorm(DynamicArrays *pcurve, int i, int count, DynamicArrays *psect, int pcount, int radius, DynamicArrays &Vertex, int x, int y) { // 定义临时变量 int j, dx1, dy1, dx2, dy2; int dis1, dis2; bool mark1, mark2; bool find = false; // 查找编号 i 之后的曲线端点是否存在距离小于半径的端点 for (j = i + 1; j < count; j++) { // 得到曲线的两个端点坐标 dx1 = pcurve[j][0]; dy1 = pcurve[j][1]; dx2 = pcurve[j][pcurve[j].getSize() - 2]; dy2 = pcurve[j][pcurve[j].getSize() - 1]; mark1 = false; mark2 = false; // 查找第一个端点到曲线 i 端点的距离是否小于 radius if (IsFindPoint(Vertex, dx1, dy1)) { // 得到两点之间的距离并且向上取整 dis1 = (int)floor(sqrt((dx1 - x) * (dx1 - x) + (dy1 - y) * (dy1 - y))); if (dis1 <= radius) { mark1 = true; } } // 查找第二个端点到曲线 i 端点的距离是否小于 radius if(IsFindPoint(Vertex, dx2, dy2)) { // 得到两点之间的距离并且向上取整 dis2 = (int)floor(sqrt((dx2 - x) * (dx2 - x) + (dy2 - y) * (dy2 - y))); if (dis2 <= radius) { mark2 = true; } } // 找到两个端点中到到曲线 i 端点的距离最小的端点进行处理 if (mark1 && mark2) { if (dis1 <= dis2) { psect[pcount].addTail(dx1, dy1); Vertex.delElem(dx1, dy1); } else { psect[pcount].addTail(dx2, dy2); Vertex.delElem(dx2, dy2); } find = true; } else if (mark1 && !mark2) { psect[pcount].addTail(dx1, dy1); Vertex.delElem(dx1, dy1); find = true; } else if (!mark1 && mark2) { psect[pcount].addTail(dx2, dy2); Vertex.delElem(dx2, dy2); find = true; } } // 返回值 find return find; } // Host 函数:bpConnect(断点的重组,根据用户输入的半径进行曲线端点组合) static __host__ void bpConnect(DynamicArrays *pcurve, int count, int radius, DynamicArrays *psect, int *pcount, DynamicArrays &Vertex) { // 定义临时变量 int i, num; int x1, y1, x2, y2; bool find; // 初始化为新增加的交点数为 0 *pcount = 0; // 循环遍历每条曲线的两个端点 for (i = 0; i < count - 1; i++) { num = pcurve[i].getSize(); // 得到曲线的端点坐标 x1 = pcurve[i][0]; y1 = pcurve[i][1]; x2 = pcurve[i][num - 2]; y2 = pcurve[i][num - 1]; find = false; // 判断原先是不是从端点点集得到的端点 if (IsFindPoint(Vertex, x1, y1)) { // 从编号往后的曲线中找到符合条件的端点 find = AidNorm(pcurve, i, count, psect, *pcount, radius, Vertex, x1, y1); // 如果找到,从端点数组中删除这个端点,增加到编号为 *pcount 的 // 近域点集中 if (find) { Vertex.delElem(x1, y1); psect[*pcount].addTail(x1, y1); *pcount += 1; } } find = false; // 判断原先是不是从端点点集得到的端点 if (IsFindPoint(Vertex, x2, y2)) { // 从编号往后的曲线中找到符合条件的端点 find = AidNorm(pcurve, i, count, psect, *pcount, radius, Vertex, x2, y2); // 如果找到,从端点数组中删除这个端点,增加到编号为 *pcount 的 // 近域点集中 if (find) { Vertex.delElem(x2, y2); psect[*pcount].addTail(x2, y2); *pcount += 1; } } } } // Host 函数:makeNode(根据曲线的起点和终点,以及边的情况,得到曲线的编号) static __host__ void makeNode(DynamicArrays *pcurve, int count, DynamicArrays *pcurno) { // 定义临时变量 int num1 = 0, num2 = 1, num = 0; int i, j, size1, size2; int x1, y1, x2, y2; // 定义 bool 型变量,表示查找是否之前相同的端点出现过 bool find1, find2; // 给第一条曲线,添加首尾端点编号为 0 1 pcurno[0].addTail(0, 1); // 接下来的端点编号从 2 开始 num = 2; // 循环给剩下的曲线端点编号,并且编号不能重复 for (i = 1; i < count; i++) { // 初始化没有找到 find1 = find2 = false; // 得到当前曲线的动态数组长度 size2 = pcurve[i].getSize(); // 查找之前的曲线端点 for (j = i - 1; j >= 0; j--) { // 得到当前曲线的动态数组长度 size1 = pcurve[j].getSize(); // 得到当前曲线的首尾端点坐标 x1 = pcurve[j][0]; y1 = pcurve[j][1]; x2 = pcurve[j][size1 - 2]; y2 = pcurve[j][size1 - 1]; // 如果找到了首端点编号,得到当前编号值 if (pcurve[i][0] == x1 && pcurve[i][1] == y1) { num1 = pcurno[j][0]; find1 = true; } else if (pcurve[i][0] == x2 && pcurve[i][1] == y2) { num1 = pcurno[j][1]; find1 = true; } // 如果找到了尾端点编号,得到当前编号值 if (pcurve[i][size2 - 2] == x1 && pcurve[i][size2 - 1] == y1) { num2 = pcurno[j][0]; find2 = true; } else if (pcurve[i][size2 - 2] == x2 && pcurve[i][size2 - 1] == y2) { num2 = pcurno[j][1]; find2 = true; } } // 如果首尾端点都找到了,则把之前得到的编号赋给当前曲线 if (find1 && find2) { pcurno[i].addTail(num1, num2); // 如果仅仅首端点找到了,则把之前得到的编号赋给当前曲线 } else if (find1 && !find2) { pcurno[i].addTail(num1, num); num++; // 如果仅仅尾端点找到了,则把之前得到的编号赋给当前曲线 } else if (!find1 && find2) { pcurno[i].addTail(num, num2); num++; // 如果首尾端点都没有找到,则重新累加赋值 } else { pcurno[i].addTail(num, num + 1); num += 2; } } // 曲线端点编号结束后,给曲线的边赋值,也不会重复 for (i = 0; i < count; i++) { pcurno[i].addElem(num++); } } // Host 函数:openCurvePath(得到非闭合曲线编号序列) static __host__ void openCurvePath(DynamicArrays *opencurnode, int *openNum, Graph *G, int start, int end) { // 定义动态数组变量,表示边栈和点栈 DynamicArrays edgestack, vexstack; // 定义点栈顶和边栈顶数,并且初始化 int vtop = -1, etop = -1; // 定义点栈和边栈的大小 int vstacksize, estacksize; // 定义临时变量 int curnode; // 定义临时边指针 Edge *cur; // 首端点入栈 vexstack.addElem(start); // 复位所有当前要访问的边 G->resetCurrent(); // 循环,用于得到从起点到终点的所有路径 while (vexstack.getSize() != 0) { // 得到当前栈的大小 vstacksize = vexstack.getSize(); estacksize = edgestack.getSize(); // 如果栈顶的值为终点 if (vexstack[vstacksize - 1] == end) { // 得到一条从起点到终点的路径并且保存。即添加端点编号和边编号 for (int i = 0; i < estacksize; i++) { opencurnode[*openNum].addTail(vexstack[i], edgestack[i]); } // 添加终点编号 opencurnode[*openNum].addElem(end); // 曲线条数增加 1 *openNum += 1; // 删除点栈顶和边栈顶的端点,搜索下一条可能的路径 vexstack.delTail(vtop); edgestack.delTail(etop); // 如果栈顶的值不是终点,则继续搜索可能的路径 } else { // 得到当前栈顶的值 curnode = vexstack[vstacksize - 1]; // 得到图的当前点要访问的边 cur = G->vertexlist[curnode].current; // 如果当前要访问的边不为空 if (cur != NULL) { // 得到当前边的另一个顶点,如果该顶点不在点栈中,当前边也不在边 // 栈中,则把当前点和边分别入栈,把当前要访问的边指针指向下一条 // 边。判断是为了确保路径的点和边不能重复 if (!edgestack.findElem(cur->eno) && !vexstack.findElem(cur->jvex)) { vexstack.addElem(cur->jvex); edgestack.addElem(cur->eno); } G->vertexlist[curnode].current = cur->link; // 如果当前要访问的边为空,则当前点连接的边都访问过,删除点栈顶和 // 边栈顶的端点,重新设置当前栈顶端点的当前要访问的边 } else { vexstack.delTail(vtop); edgestack.delTail(etop); // 如果点栈顶的值等于起始点,则退出循环 if (vtop == start) break; // 设置当前栈顶端点的当前要访问的边为第一条边 G->vertexlist[vtop].current = G->vertexlist[vtop].firstedge; } } } } // Host 函数:closeCurvePath(得到闭合曲线编号序列) static __host__ void closeCurvePath(DynamicArrays *closecurnode, int *closeNum, Graph *G, int insect) { // 定义动态数组变量,表示边栈和点栈 DynamicArrays edgestack, vexstack; // 定义点栈顶和边栈顶数,并且初始化 int vtop = -1, etop = -1; // 定义点栈和边栈的大小 int vstacksize, estacksize; // 定义临时变量 int curnode; // 是否找到一样的路径,尽管顺序不一样 bool isFind; // 定义临时边指针 Edge *cur; // 路径起始端点入栈 vexstack.addElem(insect); // 闭合曲线数量 int num = *closeNum; // 复位所有当前要访问的边 G->resetCurrent(); while (vexstack.getSize() != 0) { // 得到当前栈的大小 vstacksize = vexstack.getSize(); estacksize = edgestack.getSize(); // 初始化 isFind 为 false isFind = false; // 当边栈不为空,且点栈栈顶元素值为起点,则保存一条得到的闭合路径 if (estacksize != 0 && vexstack[vstacksize - 1] == insect) { for (int i = 0; i < estacksize; i++) { closecurnode[num].addTail(vexstack[i], edgestack[i]); } closecurnode[num].addElem(insect); // 查找是否和之前得到的路径表示是同一条路径 for (int j = 0; j < num; j++) { if (IsArrayEqual(closecurnode[j], closecurnode[num])) { isFind = true; break; } } // 如果找到了一样的路径,就清空当前得到的闭合路径 if (isFind) { closecurnode[num].clear(); // 如果没有找到,则保存当前得到的闭合路径,并且路径数量加 1 } else { num++; } // 删除点栈顶和边栈顶的端点,搜索下一条可能的路径 vexstack.delTail(vtop); edgestack.delTail(etop); // 栈顶不是起点,则继续搜索可能的路径 } else { // 得到当前栈顶的值 curnode = vexstack[vstacksize - 1]; // 得到图的当前点要访问的边 cur = G->vertexlist[curnode].current; // 如果当前要访问的边不为空 if (cur != NULL) { // 得到当前边的另一个顶点,如果当前边不在边栈中,则把当前点和边 // 分别入栈,把当前要访问的边指针指向下一条边。 if (!edgestack.findElem(cur->eno)) { if ((cur->jvex == insect)|| !vexstack.findElem(cur->jvex)) { vexstack.addElem(cur->jvex); edgestack.addElem(cur->eno); } } G->vertexlist[curnode].current = cur->link; // 如果当前要访问的边为空,则当前点连接的边都访问过,删除点栈顶和 // 边栈顶的端点,重新设置当前栈顶端点的当前要访问的边 } else { vexstack.delTail(vtop); edgestack.delTail(etop); // 如果点栈顶的值等于起始点,则退出循环 if (vtop == insect) break; // 设置设置当前栈顶端点的当前要访问的边为第一条边 G->vertexlist[vtop].current = G->vertexlist[vtop].firstedge; } } } // 得到闭合曲线的数量 *closeNum = num; } // Host 函数:IsArrayEqual(判断两个动态数组表示的曲线是否表示同一条曲线) static __host__ bool IsArrayEqual(DynamicArrays object1, DynamicArrays object2) { // 两个动态数组的大小不一致,则直接返回 false if (object1.getSize() != object2.getSize()) { return false; // 否则看排序后结果是否一样,如果一样,则返回 true,否则返回 false // 由于处理的是闭合曲线编号,头尾是一致的,则排序比较不包括最后一个编号 } else { // 得到曲线编号动态数组大小 int size = object1.getSize(); // 定义临时指针变量,得到第一个动态数组的整型指针 int *p = object1.getCrvDatap(); // 定义临时变量,用于交换数据 int temp; // 临时变量 int min; // 排序第一个动态数组的数据 for (int i = 0; i < size - 2; i++) { min = i; for (int j = i + 1; j < size - 1; j++) { if (p[j] < p[min]) { min = j; } } // 如果找到其他最小的则交换 if (min != i) { temp = p[i]; p[i] = p[min]; p[min] = temp; } } // 定义临时指针变量,得到第二个动态数组的整型指针 int *q = object2.getCrvDatap(); // 排序第二个动态数组的数据 for (int i = 0; i < size - 2; i++) { min = i; for (int j = i + 1; j < size - 1; j++) { if (q[j] < q[min]) { min = j; } } // 如果找到其他最小的则交换 if (min != i) { temp = q[i]; q[i] = q[min]; q[min] = temp; } } // 排序结果如果不一样,则返回 false for (int i = 0; i < size - 1; i++) { if (p[i] != q[i]) { return false; } } // 表示同一条路径,返回 true return true; } } // Host 函数:getPointNo(根据坐标对得到数组内对应编号) static __host__ void getPointNo(DynamicArrays *pcurve, int count, DynamicArrays *pcurno, DynamicArrays &array, DynamicArrays &arrayno) { // 临时变量,用于循环计数 int i, j; // 定义临时变量,存储坐标 int dx, dy; // 循环得到数组内坐标对编号 for (i = 0; i < array.getSize();) { // 得到数组的 x,y 坐标 dx = array[i++]; dy = array[i++]; // 根据得到的曲线头尾坐标得到相应编号 for (j = 0; j < count; j++) { // 如果为曲线首部 if (dx == pcurve[j][0] && dy == pcurve[j][1]) { arrayno.addElem(pcurno[j][0]); break; // 如果为曲线尾部 } else if (dx == pcurve[j][pcurve[j].getSize() - 2] && dy == pcurve[j][pcurve[j].getSize() - 1]) { arrayno.addElem(pcurno[j][1]); break; } } } } // Host 函数:getCurveNonFormat(得到非格式化输出曲线数据有序序列) static __host__ void getCurveNonFormat(DynamicArrays *curnode, DynamicArrays *pcurve, int count, DynamicArrays *pcurno, DynamicArrays *cur, int num, bool close) { // 临时变量,存储曲线编号数组的大小 int nodesize; // 临时变量,存储得到的端点编号值 int inode; // 临时变量,得到点的数目 int vnum = pcurno[count - 1][2] - count + 1; // 临时变量,存储得到的曲线下标 int icur; // 定义循环计数变量 int i, j; // 临时变量,作为函数参数得到曲线的末尾坐标 int xtop, ytop; // 根据得到的曲线编号集获得对应曲线 for (i = 0; i < num; i++) { // 得到曲线编号数组的大小 nodesize = curnode[i].getSize(); // 循环得到曲线端点和边编号并且得到组合曲线 for (j = 0; j < nodesize;) { // 得到点编号 inode = curnode[i][j++]; // 如果超过大小,则推出循环 if (j >= nodesize) break; // 根据边编号得到曲线下标 icur = curnode[i][j++] - vnum; // 点编号和曲线下标,得到组合曲线 if (inode == pcurno[icur][0]) { cur[i].addArray(pcurve[icur]); if (j != nodesize - 1) { cur[i].delTail(ytop); cur[i].delTail(xtop); } } else if (inode == pcurno[icur][1]) { pcurve[icur].reverse(); cur[i].addArray(pcurve[icur]); if (j != nodesize - 1) { cur[i].delTail(ytop); cur[i].delTail(xtop); } pcurve[icur].reverse(); } } // 如果为闭合曲线就删除末尾坐标 if (close) { // 由于末尾坐标和起始一样,删除末尾坐标 cur[i].delTail(ytop); cur[i].delTail(xtop); } } } // Host 函数:freeCurve(释放曲线申请的空间) void freeCurve(Curve ***curveList, int count) { if (curveList == NULL) return; // 循环释放空间 for (int i = 0; i < count; i++) { CurveBasicOp::deleteCurve((*curveList)[i]); } delete [](*curveList); } // Kernel 函数:_traverseKer(并行遍历图像得到端点数组和交点数组,并且得到去掉 // 交点后的输出图像) static __global__ void _traverseKer(ImageCuda inimg, ImageCuda outimg, int *array1_dev, int *array2_dev, Template boxtpl) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 计算输出坐标点对应的图像数据数组下标。 int outidx = r * inimg.imgMeta.width + c; // 如果当前像素点为 0,则输出图像对应位置零,并且返回。 if (inimg.imgMeta.imgData[inidx] == 0) { outimg.imgMeta.imgData[inidx] = 0; return; } int tmpidx; // 临时变量,存储模板其他点的图像数据数组下标 int count = 0; // 临时变量,存储灰度不为 0 的个数 int dx, dy; // 临时变量,存储模板坐标 int *p = boxtpl.tplData; // 临时变量,得到模板指针 // 扫描该点模版范围内有多少个灰度值不为 0 的点 for (int i = 0; i < boxtpl.count; i++) { // 计算当模版位置所在像素的 x 和 y 分量,模版使用相邻的两个下标的 // 数组表示一个点,所以使用当前模版位置的指针加一操作 dx = c + *(p++); dy = r + *(p++); // 如果是当前点则理下一个点 if (dx == c && dy == r) continue; // 计算坐标点对应的图像数据数组下标。 tmpidx = dy * inimg.pitchBytes + dx; // 得到当前点 8 领域内的非零像素点个数 if (inimg.imgMeta.imgData[tmpidx] != 0) { count++; } } // 如果 count 为 0,表示该像素八领域没有不为 0 的像素点,则该点是 // 孤立点,则给对应输出图像在该处赋值为 0 if (count == 0) { outimg.imgMeta.imgData[inidx] = 0; return; // 如果 flag 大于等于 3,表示该像素点作为曲线交点,则给对应输出图像 // 在该处赋值为 0 } else if (count >= 3) { array2_dev[2 * outidx] = c; array2_dev[2 * outidx + 1] = r; outimg.imgMeta.imgData[inidx] = 0; // 如果 count 为 1,表示该像素八领域有一个不为 0 的像素点,则该点是 // 曲线端点,并给对应输出图像在该处赋值原图像对应点像素值 } else if (count == 1) { array1_dev[2 * outidx] = c; array1_dev[2 * outidx + 1] = r; outimg.imgMeta.imgData[inidx] = inimg.imgMeta.imgData[inidx]; // 否则flag则为 2,表示该像素点作为曲线上的点,并给对应输出图像在该处 // 赋值原图像对应点像素值 } else { outimg.imgMeta.imgData[inidx] = inimg.imgMeta.imgData[inidx]; } } // Kernel 函数:_traverseKerNew(遍历图像,得到图像上所有的像素点) static __global__ void _traverseKerNew(ImageCuda inimg, int *array1_dev) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 计算输出坐标点对应的图像数据数组下标。 int outidx = r * inimg.imgMeta.width + c; // 如果当前像素点不为 0,则得到该坐标 if (inimg.imgMeta.imgData[inidx] != 0) { array1_dev[2 * outidx] = c; array1_dev[2 * outidx + 1] = r; } } // 宏:FAIL_CURVETRACING_FREE // 当下面函数运行出错时,使用该宏清除内存,防止内存泄漏。 #define FAIL_CURVETRACING_FREE do { \ if (outimg1 != NULL) { \ ImageBasicOp::deleteImage(outimg1); \ outimg1 = NULL; \ } \ if (outimg2 != NULL) { \ ImageBasicOp::deleteImage(outimg2); \ outimg2 = NULL; \ } \ if (tmpdev != NULL) { \ cudaFree(tmpdev); \ tmpdev = NULL; \ } \ if (array1 != NULL) { \ delete []array1; \ array1 = NULL; \ } \ if (array2 != NULL) { \ delete []array2; \ array2 = NULL; \ } \ if (boxtpl != NULL) { \ TemplateFactory::putTemplate(boxtpl); \ boxtpl = NULL; \ } \ if (mark != NULL) { \ delete []mark; \ mark = NULL; \ } \ if (pcurve != NULL) { \ delete []pcurve; \ pcurve = NULL; \ } \ if (insect != NULL) { \ delete []insect; \ insect = NULL; \ } \ if (psect != NULL) { \ delete []psect; \ psect = NULL; \ } \ if (pcurno != NULL) { \ delete []pcurno; \ pcurno = NULL; \ } \ if (opencur != NULL) { \ delete []opencur; \ opencur = NULL; \ } \ if (closecur != NULL) { \ delete []closecur; \ closecur = NULL; \ } \ if (G != NULL) { \ delete G; \ G = NULL; \ } \ } while (0) // Host 成员方法:curveTracing(曲线跟踪) // 对图像进行曲线跟踪,得到非闭合曲线和闭合曲线的有序序列 __host__ int CurveTracing::curveTracing(Image *inimg, Curve ***curveList, int *openNum, int *closeNum) { // 如果输入图像指针为空或者输出的曲线集指针为空,错误返回 if (inimg == NULL || curveList == NULL) return NULL_POINTER; // 定义错误码变量 int errcode; cudaError_t cuerrcode; // 定义输出图像 1 和 2 Image *outimg1 = NULL; Image *outimg2 = NULL; // 定义指针 tmpdev 给设备端端点数组和交点数组创建存储空间 int *tmpdev = NULL; // 定义 CPU 端端点数组和交点数组 int *array1 = NULL; int *array2 = NULL; // 定义模板 boxtpl 用于获取模板 Template *boxtpl = NULL; // 定义标志数组,标志图像上非零点的访问情况 int *mark = NULL; // 定义曲线数组,存储得到的曲线 DynamicArrays *pcurve = NULL; // 定义交点分类的动态数组,存储分类的结果 DynamicArrays *insect = NULL; // 定义近域点集动态数组,用于断点连续的处理 DynamicArrays *psect = NULL; // 定义变量,存储曲线的编号; DynamicArrays *pcurno = NULL; // 定义非闭合曲线 DynamicArrays *opencur = NULL; // 定义闭合曲线 DynamicArrays *closecur = NULL; // 定义图类的指针变量 Graph *G = NULL; // 给输出图像构建空间 ImageBasicOp::newImage(&outimg1); ImageBasicOp::makeAtHost(outimg1, inimg->width, inimg->height); ImageBasicOp::newImage(&outimg2); ImageBasicOp::makeAtHost(outimg2, inimg->width, inimg->height); // 将图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } errcode = ImageBasicOp::copyToCurrentDevice(outimg1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } errcode = ImageBasicOp::copyToCurrentDevice(outimg2); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 提取输出图像 1 的 ROI 子图像。 ImageCuda outsubimgCud1; errcode = ImageBasicOp::roiSubImage(outimg1, &outsubimgCud1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 提取输出图像 2的 ROI 子图像。 ImageCuda outsubimgCud2; errcode = ImageBasicOp::roiSubImage(outimg2, &outsubimgCud2); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 定义八领域模板 int tpl[16] = { -1, -1, 0, -1, 1, -1, 1, 0, 1, 1, 0, 1, -1, 1, -1, 0 }; // 定义变量,用于循环 int i, j, k; // 定义临时变量,得到第一次遍历得到的端点和交点动态数组大小 int num1 = 0, num2 = 0; // 定义临时变量存储坐标值 int dx, dy; // 计算数据尺寸。 int arraysize = inimg->width * inimg->height * 2; int datasize = arraysize * 2 * sizeof(int); // 在当前设备上申请坐标数据的空间。 cuerrcode = cudaMalloc((void **)(&tmpdev), datasize); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 给该空间内容全部赋值为 -1 cuerrcode = cudaMemset(tmpdev, -1, datasize); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 定义设备端端点数组和交点数组 int *array1_dev = tmpdev; int *array2_dev = tmpdev + arraysize; // 定义模板的尺寸 dim3 boxsize(3, 3, 1); // 通过模板工厂得到圆形领域模板 errcode = TemplateFactory::getTemplate(&boxtpl, TF_SHAPE_BOX, boxsize, NULL); // 检查模板是否为 NULL,如果为 NULL 直接报错返回。 if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 将模板拷贝到 Device 内存中 errcode = TemplateBasicOp::copyToCurrentDevice(boxtpl); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 计算调用第一个 Kernel 所需要的线程块尺寸。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (insubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // 调用第一个 Kernel 生成图像标志位数组。 _traverseKer<<<gridsize, blocksize>>>(insubimgCud, outsubimgCud1, array1_dev, array2_dev, *boxtpl); if (cudaGetLastError() != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 给 CPU 端端点数组和交点数组申请空间 array1 = new int[arraysize]; array2 = new int[arraysize]; // 把两个数组拷贝到 Host 端 cuerrcode = cudaMemcpy(array1, array1_dev, arraysize * sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } cuerrcode = cudaMemcpy(array2, array2_dev, arraysize * sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 定义端点动态数组和交点动态数组 DynamicArrays Vertex, Intersect; // 把得到的端点和交点数组的非 -1 值赋值给端点动态数组和交点动态数组 for (i = 0; i < arraysize; i++) { if (array1[i] != -1) { Vertex.addElem(array1[i]); } } for (i = 0; i < arraysize; i++) { if (array2[i] != -1) { Intersect.addElem(array2[i]); } } // 得到第一次遍历得到的端点和交点动态数组大小 num1 = Vertex.getSize(); num2 = Intersect.getSize(); // 如果图像上曲线有端点和交点时,说明有曲线相交,可能有闭合和非闭合曲线, // 如果图像上曲线有端点没有交点时,但是经过断续连接有可能产生闭合和 // 非闭合曲线 if ((num1 && num2) || (num1 && !num2)) { // 重新给该空间内容全部赋值为 -1,用于第二次遍历 cuerrcode = cudaMemset(tmpdev, -1, datasize); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 第二次并行遍历 _traverseKer<<<gridsize, blocksize>>>(outsubimgCud1, outsubimgCud2, array1_dev, array2_dev, *boxtpl); if (cudaGetLastError() != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 把端点数组拷贝到 Host 端 cuerrcode = cudaMemcpy(array1, array1_dev, arraysize * sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 定义第二次遍历要得到的端点动态数组 DynamicArrays Vertex1; for (i = 0; i < arraysize; i++) { if (array1[i] != -1) { Vertex1.addElem(array1[i]); } } // 将图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToHost(outimg1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 申请标志数组的空间,大小和图像一样 mark = new int[arraysize / 2]; // 初始化标志数组的值为 0 memset(mark, 0, sizeof(int) * arraysize / 2); // 定义变量 count 表示得到的曲线输量 int count = 0; // 标志曲线跟踪的端点是否已经在曲线中,用于 getCurve 函数调用 int test = 0; // 申请曲线数组空间,曲线最多数目是端点的个数 pcurve = new DynamicArrays [Vertex1.getSize() / 2]; // 循环调用 getCurve 函数得到非闭合曲线的有序序列 for(i = 0; i < Vertex1.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex1[i], Vertex1[i + 1]); // 如果 test 不为 0,则 count 不加 1,继续循环,否则曲线数目加 1 if (test) { test = 0; continue; } count++; } // 定义临时变量存储坐标值 int x, y; // 定义变量,存储交点的个数 int sectnum = 0; // 申请交点分类的动态数组空间 insect = new DynamicArrays [num2 / 2]; // 循环得到交点分类动态数组值 while (Intersect.getSize()) { x = Intersect[0]; y = Intersect[1]; sectnum++; insectClassify(x, y, Intersect, insect, sectnum, tpl); } // 定义真正的交点数组,得到的是唯一确定的交点,与交点曲线方向动态数组集 // 相对应,大小其实为交点个数。从分类的交点集中取领域数最大的点作为交点 DynamicArrays realsect; // 循环得到交点曲线方向动态数组集和真正的交点数组 for (i = 0; i < sectnum; i++) { // 定义变量,存储领域数最大的点标记值,并初始化为 0 int maxvalue = 0; // 定义变量,存储坐标值,初始化为第一条曲线第一个点的坐标值 int insect_x = insect[i][0], insect_y = insect[i][1]; // 根据之前的分类结果,循环得到交点曲线方向动态数组 for (j = 0; j < insect[i].getSize(); j += 2) { x = insect[i][j]; y = insect[i][j + 1]; // 定义临时变量,存储分类集合中的点的八领域内有多少个点 int value = 0; for (k = 0; k < 16; k += 2) { dx = x + tpl[k]; dy = y + tpl[k + 1]; // 遍历点周围有多少个点 for (int s = 0; s < insect[i].getSize(); s += 2) { if (dx == insect[i][s] && dy == insect[i][s + 1]) { value++; } } } // 找到最中心的交点 if (value > maxvalue) { maxvalue = value; insect_x = x; insect_y = y; } } // 得到交点坐标值 realsect.addElem(insect_x); realsect.addElem(insect_y); } // 调用函数得到重组后的曲线,还是存储于 pcurve 中 interAssemble(pcurve, count, insect, sectnum, realsect, tpl); // 定义近域点集的个数,得到新产生的交点个数 int pcount = 0; // 给近域点集申请最大空间 psect = new DynamicArrays[Vertex.getSize() / 2]; // 根据用户输入的半径得到近域点集,并且更新端点动态数组 bpConnect(pcurve, count, radius, psect, &pcount, Vertex); // 断点重组,根据用户输入的半径进行曲线断点组合,更新交点动态数组 verAssemble(pcurve, count, psect, pcount, realsect); // 存储曲线的编号,空间大小和之前提取的曲线一样 pcurno = new DynamicArrays[count]; // 调用函数得到曲线编号集合 makeNode(pcurve, count, pcurno); // 定义变量,存储图的边数,并且赋值 int edgenum = count; // 定义变量,存储图的点数,并且赋值 int vexnum = pcurno[count - 1][2] - edgenum + 1; // 给图申请空间,根据边数和点数,初始化图 G = new Graph(vexnum, edgenum); // 根据曲线编号集,给图设置相应的边 for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // 定义曲线编号集数组,分为非闭合曲线和闭合曲线 DynamicArrays opencurnode[CURVE_VALUE], closecurnode[CURVE_VALUE]; // 定义端点编号数组和交点编号数组,分别得到端点和交点的坐标对应的编号数 DynamicArrays vertexno; DynamicArrays intersectno; // 调用函数得到数组端点的编号 getPointNo(pcurve, count, pcurno, Vertex, vertexno); // 调用函数得到数组交点的编号 if (realsect.getSize() > 0) getPointNo(pcurve, count, pcurno, realsect, intersectno); // 起始闭合和非闭合曲线的数目都设置为 0 *openNum = 0; *closeNum = 0; // 循环得到非闭合曲线的路径编号 for (i = 0; i < vertexno.getSize(); i++) { // 定义起始点 int start, end; start = vertexno[i]; for (j = i + 1; j < vertexno.getSize(); j++) { end = vertexno[j]; // 调用函数,得到非闭合曲线编号序列集 openCurvePath(opencurnode, openNum, G, start, end); } } // 循环得到闭合曲线的路径编号 for (i = 0; i < intersectno.getSize(); i++) { // 调用函数,得到闭合曲线编号序列集 closeCurvePath(closecurnode, closeNum, G, intersectno[i]); } // 申请非闭合曲线空间 opencur = new DynamicArrays[*openNum]; // 申请闭合曲线空间 closecur = new DynamicArrays[*closeNum]; // 调用函数得到非格式输出的非闭合曲线 getCurveNonFormat(opencurnode, pcurve, count, pcurno, opencur, *openNum, false); // 调用函数得到非格式输出的闭合曲线 getCurveNonFormat(closecurnode, pcurve, count, pcurno, closecur, *closeNum, true); // 定义曲线总数 int total = *openNum + *closeNum; // 给输出结果赋值,首先申请空间大小 *curveList = new Curve *[total]; // 定义变量,表示曲线长度 size_t curveLength; // 定义变量,表示动态数组里的整型指针 int *crvData; // 循环得到输出非闭合曲线 for (i = 0; i < *openNum; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE; return OUT_OF_MEM; } // 得到曲线长度 curveLength = (size_t)(opencur[i].getSize() / 2); // 得到动态数组里的整型指针 crvData = opencur[i].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return errcode; } } // 循环得到输出闭合曲线 for (; i < total; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE; return OUT_OF_MEM;; } // 得到曲线长度 curveLength = (size_t)(closecur[i - *openNum].getSize() / 2); // 得到动态数组里的整型指针 crvData = closecur[i - *openNum].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return errcode; } } } // 如果图像上没有端点只有交点时候,说明是闭合曲线相交 else if (!num1 && num2) { // 重新给该空间内容全部赋值为 -1,用于第二次遍历 cuerrcode = cudaMemset(tmpdev, -1, datasize); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 第二次并行遍历 _traverseKer<<<gridsize, blocksize>>>(outsubimgCud1, outsubimgCud2, array1_dev, array2_dev, *boxtpl); if (cudaGetLastError() != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 把端点数组拷贝到 Host 端 cuerrcode = cudaMemcpy(array1, array1_dev, arraysize * sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 定义第二次遍历要得到的端点动态数组 DynamicArrays Vertex1; for (i = 0; i < arraysize; i++) { if (array1[i] != -1) { Vertex1.addElem(array1[i]); } } // 将图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToHost(outimg1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 申请标志数组的空间,大小和图像一样 mark = new int[arraysize / 2]; // 初始化标志数组的值为 0 memset(mark, 0, sizeof(int) * arraysize / 2); // 定义变量 count 表示得到的曲线输量 int count = 0; // 标志曲线跟踪的端点是否已经在曲线中,用于 getCurve 函数调用 int test = 0; // 申请曲线数组空间,曲线最多数目是端点的个数 pcurve = new DynamicArrays [Vertex1.getSize() / 2]; // 循环调用 getCurve 函数得到非闭合曲线的有序序列 for(i = 0; i < Vertex1.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex1[i], Vertex1[i + 1]); // 如果 test 不为 0,则 count 不加 1,继续循环,否则曲线数目加 1 if (test) { test = 0; continue; } count++; } // 定义临时变量存储坐标值 int x, y; // 定义变量,存储交点的个数 int sectnum = 0; // 申请交点分类的动态数组空间 insect = new DynamicArrays [num2 / 2]; // 循环得到交点分类动态数组值 while (Intersect.getSize()) { x = Intersect[0]; y = Intersect[1]; sectnum++; insectClassify(x, y, Intersect, insect, sectnum, tpl); } // 定义真正的交点数组,得到的是唯一确定的交点,与交点曲线方向动态数组集 // 相对应,大小其实为交点个数。从分类的交点集中取领域数最大的点作为交点 DynamicArrays realsect; // 循环得到交点曲线方向动态数组集和真正的交点数组 for (i = 0; i < sectnum; i++) { // 定义变量,存储领域数最大的点标记值,并初始化为 0 int maxvalue = 0; // 定义变量,存储坐标值,初始化为第一条曲线第一个点的坐标值 int insect_x = insect[i][0], insect_y = insect[i][1]; // 根据之前的分类结果,循环得到交点曲线方向动态数组 for (j = 0; j < insect[i].getSize(); j += 2) { x = insect[i][j]; y = insect[i][j + 1]; // 定义临时变量,存储分类集合中的点的八领域内有多少个点 int value = 0; for (k = 0; k < 16; k += 2) { dx = x + tpl[k]; dy = y + tpl[k + 1]; // 遍历点周围有多少个点 for (int s = 0; s < insect[i].getSize(); s += 2) { if (dx == insect[i][s] && dy == insect[i][s + 1]) { value++; } } } // 找到最中心的交点 if (value > maxvalue) { maxvalue = value; insect_x = x; insect_y = y; } } // 得到交点坐标值 realsect.addElem(insect_x); realsect.addElem(insect_y); } // 调用函数得到重组后的曲线,还是存储于 pcurve 中 interAssemble(pcurve, count, insect, sectnum, realsect, tpl); // 申请曲线编号大小,空间大小和之前提取的曲线一样 pcurno = new DynamicArrays[count]; // 调用函数得到曲线编号集合 makeNode(pcurve, count, pcurno); // 定义变量,存储图的边数,并且赋值 int edgenum = count; // 定义变量,存储图的点数,并且赋值 int vexnum = pcurno[count - 1][2] - edgenum + 1; // 根据边数和点数,初始化图 G = new Graph(vexnum, edgenum); // 根据曲线编号集,给图设置相应的边 for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // 定义曲线编号集数组,只有闭合曲线 DynamicArrays closecurnode[CURVE_VALUE]; // 定义交点编号数组,得到端点坐标对应的编号数 DynamicArrays intersectno; // 调用函数得到数组交点的编号 getPointNo(pcurve, count, pcurno, realsect, intersectno); // 起始闭合和非闭合曲线的数目都设置为 0 *openNum = 0; *closeNum = 0; // 循环得到闭合曲线的路径编号 for (i = 0; i < intersectno.getSize(); i++) { // 调用函数,得到闭合曲线编号序列集 closeCurvePath(closecurnode, closeNum, G, intersectno[i]); } // 申请闭合曲线空间 closecur = new DynamicArrays[*closeNum]; // 调用函数得到非格式输出的闭合曲线 getCurveNonFormat(closecurnode, pcurve, count, pcurno, closecur, *closeNum, true); // 定义曲线总数 int total = *openNum + *closeNum; // 给输出结果赋值,首先申请空间大小 *curveList = new Curve *[total]; // 定义变量,表示曲线长度 size_t curveLength; // 定义变量,表示动态数组里的整型指针 int *crvData; // 循环得到输出闭合曲线 for (i = 0; i < total; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE; return OUT_OF_MEM;; } // 得到曲线长度 curveLength = (size_t)(closecur[i - *openNum].getSize() / 2); // 得到动态数组里的整型指针 crvData = closecur[i - *openNum].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return errcode; } } } // 否则只有闭合曲线,且闭合曲线之间没有相交 else { // 重新给该空间内容全部赋值为 -1,用于第二次遍历 cuerrcode = cudaMemset(array1_dev, -1, arraysize * sizeof (int)); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 第二次并行遍历,得到曲线上所有点集 _traverseKerNew<<<gridsize, blocksize>>>(outsubimgCud1, array1_dev); if (cudaGetLastError() != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 把端点数组拷贝到 Host 端 cuerrcode = cudaMemcpy(array1, array1_dev, arraysize * sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 定义第二次遍历要得到的点集 DynamicArrays point; // 把得到的端点和交点数组的非 -1 值赋值给端点动态数组和交点动态数组 for (i = 0; i < arraysize; i++) { if (array1[i] != -1) { point.addElem(array1[i]); } } // 将图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToHost(outimg1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 申请标志数组的空间,大小和图像一样 mark = new int[arraysize / 2]; // 初始化标志数组的值为 0 memset(mark, 0, sizeof(int) * arraysize / 2); // 定义变量 count 表示得到的曲线输量 int count = 0; // 标志曲线跟踪的端点是否已经在曲线中,用于 getCurve 函数调用 int test = 0; // 申请曲线数组空间,曲线最多数目是端点的个数 pcurve = new DynamicArrays [point.getSize() / 2]; // 循环调用 getCurve 函数得到非闭合曲线的有序序列 for(i = 0; i < point.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, point[i], point[i + 1]); // 如果 test 不为 0,则 count 不加 1,继续循环,否则曲线数目加 1 if (test) { test = 0; continue; } count++; } // 起始闭合和非闭合曲线的数目都设置为 0 *openNum = 0; *closeNum = 0; *closeNum = count; // 定义曲线总数 int total = count; // 给输出结果赋值,首先申请空间大小 *curveList = new Curve *[total]; // 定义变量,表示曲线长度 size_t curveLength; // 定义变量,表示动态数组里的整型指针 int *crvData; // 循环得到输出闭合曲线 for (i = 0; i < total; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE; return OUT_OF_MEM;; } // 得到曲线长度 curveLength = (size_t)(pcurve[i].getSize() / 2); // 得到动态数组里的整型指针 crvData = pcurve[i].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return errcode; } } } // 释放动态申请的空间 FAIL_CURVETRACING_FREE; // 函数执行完毕,返回 return NO_ERROR; } // 宏:FAIL_CURVETRACING_FREE_CPU // 当下面函数运行出错时,使用该宏清除内存,防止内存泄漏。 #define FAIL_CURVETRACING_FREE_CPU do { \ if (outimg1 != NULL) { \ ImageBasicOp::deleteImage(outimg1); \ outimg1 = NULL; \ } \ if (outimg2 != NULL) { \ ImageBasicOp::deleteImage(outimg2); \ outimg2 = NULL; \ } \ if (mark != NULL) { \ delete []mark; \ mark = NULL; \ } \ if (pcurve != NULL) { \ delete []pcurve; \ pcurve = NULL; \ } \ if (insect != NULL) { \ delete []insect; \ insect = NULL; \ } \ if (pcurno != NULL) { \ delete []pcurno; \ pcurno = NULL; \ } \ if (opencur != NULL) { \ delete []opencur; \ opencur = NULL; \ } \ if (closecur != NULL) { \ delete []closecur; \ closecur = NULL; \ } \ if (G != NULL) { \ delete G; \ G = NULL; \ } \ } while (0) // Host 成员方法:curveTracingCPU(曲线跟踪) // 对图像进行曲线跟踪,得到非闭合曲线和闭合曲线的有序序列 __host__ int CurveTracing::curveTracingCPU(Image *inimg, Curve ***curveList, int *openNum, int *closeNum) { // 如果输入图像指针为空或者输出的曲线集指针为空,错误返回 if (inimg == NULL || curveList == NULL) return NULL_POINTER; // 定义错误码变量 int errcode; // 定义输出图像 1 和 2 Image *outimg1 = NULL; Image *outimg2 = NULL; // 定义标志数组,标志图像上非零点的访问情况 int *mark = NULL; // 定义曲线数组,存储得到的曲线 DynamicArrays *pcurve = NULL; // 定义交点分类的动态数组,存储分类的结果 DynamicArrays *insect = NULL; // 定义变量,存储曲线的编号; DynamicArrays *pcurno = NULL; // 定义非闭合曲线 DynamicArrays *opencur = NULL; // 定义闭合曲线 DynamicArrays *closecur = NULL; // 定义图类的指针变量 Graph *G = NULL; // 构建输出图像 1 和 2 ImageBasicOp::newImage(&outimg1); ImageBasicOp::makeAtHost(outimg1, inimg->width, inimg->height); ImageBasicOp::newImage(&outimg2); ImageBasicOp::makeAtHost(outimg2, inimg->width, inimg->height); // 定义八领域模板 int tpl[16] = { -1, -1, 0, -1, 1, -1, 1, 0, 1, 1, 0, 1, -1, 1, -1, 0 }; // 定义临时变量,得到第一次遍历得到的端点和交点动态数组大小 int num1 = 0, num2 = 0; // 定义第一次遍历要得到的端点动态数组和交点动态数组 DynamicArrays Vertex; DynamicArrays Intersect; // 定义变量,用于循环 int i, j, k; // 定义临时变量存储坐标值 int dx, dy; // 遍历图像,得到端点和交点的动态数组 traverse(Vertex, Intersect, inimg, outimg1, tpl); // 得到第一次遍历得到的端点和交点动态数组大小 num1 = Vertex.getSize(); num2 = Intersect.getSize(); // 如果图像上曲线有端点和交点时,说明有曲线相交,可能有闭合和非闭合曲线 if (num1 && num2) { // 定义第二次遍历要得到的端点动态数组和交点动态数组 DynamicArrays Vertex1, Intersect1; // 第二次遍历图像,得到端点和交点的动态数组 traverse(Vertex1, Intersect1, outimg1, outimg2, tpl); // 定义变量得到输入图像的像素点数目 int maxnum = inimg->width * inimg->height; // 申请标志数组的空间 mark = new int[maxnum]; // 初始化标志数组的值为 0 memset(mark, 0, sizeof(int) * maxnum); // 定义变量 count 表示得到的曲线输量 int count = 0; // 标志曲线跟踪的端点是否已经在曲线中,用于 getCurve 函数调用 int test = 0; // 定义曲线数组,并且申请空间,曲线最多数目是端点的个数 DynamicArrays *pcurve = new DynamicArrays [Vertex1.getSize() / 2]; // 循环调用 getCurve 函数得到非闭合曲线的有序序列 for(i = 0; i < Vertex1.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex1[i], Vertex1[i + 1]); // 如果 test 不为 0,则 count 不加 1,继续循环,否则曲线数目加 1 if (test) { test = 0; continue; } count++; } // 定义临时变量存储坐标值 int x, y; // 定义变量,存储交点的个数 int sectnum = 0; // 定义交点分类的动态数组,存储分类的结果,并且申请空间 insect = new DynamicArrays [num2 / 2]; // 循环得到交点分类动态数组值 while (Intersect.getSize()) { x = Intersect[0]; y = Intersect[1]; sectnum++; insectClassify(x, y, Intersect, insect, sectnum, tpl); } // 定义真正的交点数组,得到的是唯一确定的交点,与交点曲线方向动态数组集 // 相对应,大小其实为交点个数。从分类的交点集中取领域数最大的点作为交点 DynamicArrays realsect; // 循环得到交点曲线方向动态数组集和真正的交点数组 for (i = 0; i < sectnum; i++) { // 定义变量,存储领域数最大的点标记值,并初始化为 0 int maxvalue = 0; // 定义变量,存储坐标值,初始化为第一条曲线第一个点的坐标值 int insect_x = insect[i][0], insect_y = insect[i][1]; // 根据之前的分类结果,循环得到交点曲线方向动态数组 for (j = 0; j < insect[i].getSize(); j += 2) { x = insect[i][j]; y = insect[i][j + 1]; // 定义临时变量,存储分类集合中的点的八领域内有多少个点 int value = 0; for (k = 0; k < 16; k += 2) { dx = x + tpl[k]; dy = y + tpl[k + 1]; // 遍历点周围有多少个点 for (int s = 0; s < insect[i].getSize(); s += 2) { if (dx == insect[i][s] && dy == insect[i][s + 1]) { value++; } } } // 找到最中心的交点 if (value > maxvalue) { maxvalue = value; insect_x = x; insect_y = y; } } // 得到交点坐标值 realsect.addElem(insect_x); realsect.addElem(insect_y); } // 调用函数得到重组后的曲线,还是存储于 pcurve 中 interAssemble(pcurve, count, insect, sectnum, realsect, tpl); // 定义变量,存储曲线的编号,空间大小和之前提取的曲线一样 pcurno = new DynamicArrays[count]; // 调用函数得到曲线编号集合 makeNode(pcurve, count, pcurno); // 定义变量,存储图的边数,并且赋值 int edgenum = count; // 定义变量,存储图的点数,并且赋值 int vexnum = pcurno[count - 1][2] - edgenum + 1; // 定义图的指针变量,根据边数和点数,初始化图 G = new Graph(vexnum, edgenum); // 根据曲线编号集,给图设置相应的边 for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // 定义曲线编号集数组,分为非闭合曲线和闭合曲线 DynamicArrays opencurnode[CURVE_VALUE], closecurnode[CURVE_VALUE]; // 定义端点编号数组和交点编号数组,分别得到顶点和端点的坐标对应的编号数 DynamicArrays vertexno; DynamicArrays intersectno; // 调用函数得到数组端点的编号 getPointNo(pcurve, count, pcurno, Vertex, vertexno); // 调用函数得到数组交点的编号 getPointNo(pcurve, count, pcurno, realsect, intersectno); // 起始闭合和非闭合曲线的数目都设置为 0 *openNum = 0; *closeNum = 0; // 循环得到非闭合曲线的路径编号 for (i = 0; i < vertexno.getSize(); i++) { // 定义起始点 int start, end; start = vertexno[i]; for (j = i + 1; j < vertexno.getSize(); j++) { end = vertexno[j]; // 调用函数,得到非闭合曲线编号序列集 openCurvePath(opencurnode, openNum, G, start, end); } } // 循环得到闭合曲线的路径编号 for (i = 0; i < intersectno.getSize(); i++) { // 调用函数,得到闭合曲线编号序列集 closeCurvePath(closecurnode, closeNum, G, intersectno[i]); } // 定义非闭合曲线,并且申请空间 opencur = new DynamicArrays[*openNum]; // 定义闭合曲线,并且申请大小空间 closecur = new DynamicArrays[*closeNum]; // 调用函数得到非格式输出的非闭合曲线 getCurveNonFormat(opencurnode, pcurve, count, pcurno, opencur, *openNum, false); // 调用函数得到非格式输出的闭合曲线 getCurveNonFormat(closecurnode, pcurve, count, pcurno, closecur, *closeNum, true); // 定义曲线总数 int total = *openNum + *closeNum; // 给输出结果赋值,首先申请空间大小 *curveList = new Curve *[total]; // 定义变量,表示曲线长度 size_t curveLength; // 定义变量,表示动态数组里的整型指针 int *crvData; // 循环得到输出非闭合曲线 for (i = 0; i < *openNum; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM; } // 得到曲线长度 curveLength = (size_t)(opencur[i].getSize() / 2); // 得到动态数组里的整型指针 crvData = opencur[i].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } // 循环得到输出闭合曲线 for (; i < total; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM;; } // 得到曲线长度 curveLength = (size_t)(closecur[i - *openNum].getSize() / 2); // 得到动态数组里的整型指针 crvData = closecur[i - *openNum].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } } // 如果图像上没有端点只有交点时候,说明是闭合曲线相交 else if (num1 && !num2) { // 定义变量得到输入图像的像素点数目 int maxnum = inimg->width * inimg->height; // 定义标志数组,并且申请和图像大小的空间 mark = new int[maxnum]; // 初始化标志数组的值为 0 memset(mark, 0, sizeof(int) * maxnum); // 定义变量 count 表示得到的曲线输量 int count = 0; // 标志曲线跟踪的端点是否已经在曲线中,用于 getCurve 函数调用 int test = 0; // 定义曲线数组,并且申请空间,曲线最多数目是端点的个数 DynamicArrays *pcurve = new DynamicArrays [Vertex.getSize() / 2]; // 循环调用 getCurve 函数得到非闭合曲线的有序序列 for(i = 0; i < Vertex.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex[i], Vertex[i + 1]); // 如果 test 不为 0,则 count 不加 1,继续循环,否则曲线数目加 1 if (test) { test = 0; continue; } count++; } // 定义变量,存储曲线的编号,空间大小和之前提取的曲线一样 pcurno = new DynamicArrays[count]; // 调用函数得到曲线编号集合 makeNode(pcurve, count, pcurno); // 定义变量,存储图的边数,并且赋值 int edgenum = count; // 定义变量,存储图的点数,并且赋值 int vexnum = pcurno[count - 1][2] - edgenum + 1; // 定义图的指针变量,根据边数和点数,初始化图 G = new Graph(vexnum, edgenum); // 根据曲线编号集,给图设置相应的边 for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // 定义曲线编号集数组,只有非闭合曲线 DynamicArrays opencurnode[CURVE_VALUE]; // 定义端点编号数组和交点编号数组,分别得到顶点和端点的坐标对应的编号数 DynamicArrays vertexno; // 调用函数得到数组端点的编号 getPointNo(pcurve, count, pcurno, Vertex, vertexno); // 起始闭合和非闭合曲线的数目都设置为 0 *openNum = 0; *closeNum = 0; // 循环得到非闭合曲线的路径编号 for (i = 0; i < vertexno.getSize(); i++) { // 定义起始点 int start, end; start = vertexno[i]; for (j = i + 1; j < vertexno.getSize(); j++) { end = vertexno[j]; // 调用函数,得到非闭合曲线编号序列集 openCurvePath(opencurnode, openNum, G, start, end); } } // 定义非闭合曲线,并且申请空间 opencur = new DynamicArrays[*openNum]; // 调用函数得到非格式输出的非闭合曲线 getCurveNonFormat(opencurnode, pcurve, count, pcurno, opencur, *openNum, false); // 定义曲线总数 int total = *openNum; // 给输出结果赋值,首先申请空间大小 *curveList = new Curve *[total]; // 定义变量,表示曲线长度 size_t curveLength; // 定义变量,表示动态数组里的整型指针 int *crvData; // 循环得到输出非闭合曲线 for (i = 0; i < *openNum; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM; } // 得到曲线长度 curveLength = (size_t)(opencur[i].getSize() / 2); // 得到动态数组里的整型指针 crvData = opencur[i].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } } // 如果图像上没有端点只有交点时候,说明是闭合曲线相交 else if (!num1 && num2) { // 定义第二次遍历要得到的端点动态数组和交点动态数组 DynamicArrays Vertex1, Intersect1; // 第二次遍历图像,得到端点和交点的动态数组 traverse(Vertex1, Intersect1, outimg1, outimg2, tpl); // 定义变量得到输入图像的像素点数目 int maxnum = inimg->width * inimg->height; // 定义标志数组,并且申请和图像大小的空间 mark = new int[maxnum]; // 初始化标志数组的值为 0 memset(mark, 0, sizeof(int) * maxnum); // 定义变量 count 表示得到的曲线输量 int count = 0; // 标志曲线跟踪的端点是否已经在曲线中,用于 getCurve 函数调用 int test = 0; // 定义曲线数组,并且申请空间,曲线最多数目是端点的个数 DynamicArrays *pcurve = new DynamicArrays [Vertex1.getSize() / 2]; // 循环调用 getCurve 函数得到非闭合曲线的有序序列 for(i = 0; i < Vertex1.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex1[i], Vertex1[i + 1]); // 如果 test 不为 0,则 count 不加 1,继续循环,否则曲线数目加 1 if (test) { test = 0; continue; } count++; } // 定义临时变量存储坐标值 int x, y; // 定义变量,存储交点的个数 int sectnum = 0; // 定义交点分类的动态数组,存储分类的结果,并且申请空间 insect = new DynamicArrays [num2 / 2]; // 循环得到交点分类动态数组值 while (Intersect.getSize()) { x = Intersect[0]; y = Intersect[1]; sectnum++; insectClassify(x, y, Intersect, insect, sectnum, tpl); } // 定义真正的交点数组,得到的是唯一确定的交点,与交点曲线方向动态数组集 // 相对应,大小其实为交点个数。从分类的交点集中取领域数最大的点作为交点 DynamicArrays realsect; // 循环得到交点曲线方向动态数组集和真正的交点数组 for (i = 0; i < sectnum; i++) { // 定义变量,存储领域数最大的点标记值,并初始化为 0 int maxvalue = 0; // 定义变量,存储坐标值,初始化为第一条曲线第一个点的坐标值 int insect_x = insect[i][0], insect_y = insect[i][1]; // 根据之前的分类结果,循环得到交点曲线方向动态数组 for (j = 0; j < insect[i].getSize(); j += 2) { x = insect[i][j]; y = insect[i][j + 1]; // 定义临时变量,存储分类集合中的点的八领域内有多少个点 int value = 0; for (k = 0; k < 16; k += 2) { dx = x + tpl[k]; dy = y + tpl[k + 1]; // 遍历点周围有多少个点 for (int s = 0; s < insect[i].getSize(); s += 2) { if (dx == insect[i][s] && dy == insect[i][s + 1]) { value++; } } } // 找到最中心的交点 if (value > maxvalue) { maxvalue = value; insect_x = x; insect_y = y; } } // 得到交点坐标值 realsect.addElem(insect_x); realsect.addElem(insect_y); } // 调用函数得到重组后的曲线,还是存储于 pcurve 中 interAssemble(pcurve, count, insect, sectnum, realsect, tpl); // 定义变量,存储曲线的编号,空间大小和之前提取的曲线一样 pcurno = new DynamicArrays[count]; // 调用函数得到曲线编号集合 makeNode(pcurve, count, pcurno); // 定义变量,存储图的边数,并且赋值 int edgenum = count; // 定义变量,存储图的点数,并且赋值 int vexnum = pcurno[count - 1][2] - edgenum + 1; // 定义图的指针变量,根据边数和点数,初始化图 G = new Graph(vexnum, edgenum); // 根据曲线编号集,给图设置相应的边 for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // 定义曲线编号集数组,只有闭合曲线 DynamicArrays closecurnode[CURVE_VALUE]; // 定义交点编号数组,得到端点坐标对应的编号数 DynamicArrays intersectno; // 调用函数得到数组交点的编号 getPointNo(pcurve, count, pcurno, realsect, intersectno); // 起始闭合和非闭合曲线的数目都设置为 0 *openNum = 0; *closeNum = 0; // 循环得到闭合曲线的路径编号 for (i = 0; i < intersectno.getSize(); i++) { // 调用函数,得到闭合曲线编号序列集 closeCurvePath(closecurnode, closeNum, G, intersectno[i]); } // 定义闭合曲线,并且申请大小空间 closecur = new DynamicArrays[*closeNum]; // 调用函数得到非格式输出的闭合曲线 getCurveNonFormat(closecurnode, pcurve, count, pcurno, closecur, *closeNum, true); // 定义曲线总数 int total = *openNum + *closeNum; // 给输出结果赋值,首先申请空间大小 *curveList = new Curve *[total]; // 定义变量,表示曲线长度 size_t curveLength; // 定义变量,表示动态数组里的整型指针 int *crvData; // 循环得到输出闭合曲线 for (i = 0; i < total; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM;; } // 得到曲线长度 curveLength = (size_t)(closecur[i - *openNum].getSize() / 2); // 得到动态数组里的整型指针 crvData = closecur[i - *openNum].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } } // 否则只有闭合曲线,且闭合曲线之间没有相交 else { // 定义第二次遍历要得到的点集 DynamicArrays point; // 第二次遍历图像,得到端点和交点的动态数组 traverseNew(point, outimg1); // 定义变量得到输入图像的像素点数目 int maxnum = inimg->width * inimg->height; // 定义标志数组,并且申请和图像大小的空间 mark = new int[maxnum]; // 初始化标志数组的值为 0 memset(mark, 0, sizeof(int) * maxnum); // 定义变量 count 表示得到的曲线输量 int count = 0; // 标志曲线跟踪的端点是否已经在曲线中,用于 getCurve 函数调用 int test = 0; // 定义曲线数组,并且申请空间,曲线最多数目是端点的个数 pcurve = new DynamicArrays [point.getSize()]; // 循环调用 getCurve 函数得到非闭合曲线的有序序列 for(i = 0; i < point.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, point[i], point[i + 1]); // 如果 test 不为 0,则 count 不加 1,继续循环,否则曲线数目加 1 if (test) { test = 0; continue; } count++; } // 起始闭合和非闭合曲线的数目都设置为 0 *openNum = 0; *closeNum = 0; *closeNum = count; // 定义曲线总数 int total = *openNum + *closeNum; // 给输出结果赋值,首先申请空间大小 *curveList = new Curve *[total]; // 定义变量,表示曲线长度 size_t curveLength; // 定义变量,表示动态数组里的整型指针 int *crvData; // 循环得到输出闭合曲线 for (i = 0; i < total; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM;; } // 得到曲线长度 curveLength = (size_t)(pcurve[i].getSize() / 2); // 得到动态数组里的整型指针 crvData = pcurve[i].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } } // 释放动态申请的空间 FAIL_CURVETRACING_FREE_CPU; // 函数执行完毕,返回 return NO_ERROR; }
ecd815a39f45a589e89d39fc5361f5e64d4004db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> // keeping THC headers for gpuAtomicAdd #include <THH/THHAtomics.cuh> #include <thrust/pair.h> namespace at { namespace native { namespace { using at::cuda::detail::canUse32BitIndexMath; __device__ inline thrust::pair<int64_t, int64_t> get_index_mapping1d( int64_t input_w, int64_t output_w, int64_t output_x, int64_t pad_l) { // 3D grid of 1D blocks auto input_offset = (blockIdx.y + blockIdx.z * gridDim.y) * input_w; auto output_offset = (blockIdx.y + blockIdx.z * gridDim.y) * output_w; auto i_start_x = ::max(int64_t(0), -pad_l); auto o_start_x = ::max(int64_t(0), pad_l); int64_t input_x = ::abs(output_x - pad_l) - ::abs(output_x - (input_w + pad_l - 1)) - output_x + 2 * pad_l + input_w - 1 - o_start_x + i_start_x; return thrust::make_pair<int64_t, int64_t>( input_offset + input_x, output_offset + output_x); } __device__ inline thrust::pair<int64_t, int64_t> get_index_mapping2d( int64_t input_dim_x, int64_t input_dim_y, int64_t output_dim_x, int64_t output_dim_y, int64_t pad_l, int64_t pad_t, int64_t output_xy) { // 3D grid of 1D blocks auto input_offset = (blockIdx.y + blockIdx.z * gridDim.y) * input_dim_x * input_dim_y; auto output_offset = (blockIdx.y + blockIdx.z * gridDim.y) * output_dim_x * output_dim_y; auto output_x = output_xy % output_dim_x; auto output_y = output_xy / output_dim_x; auto i_start_x = ::max(int64_t(0), -pad_l); auto i_start_y = ::max(int64_t(0), -pad_t); auto o_start_x = ::max(int64_t(0), pad_l); auto o_start_y = ::max(int64_t(0), pad_t); auto input_x = ::abs(output_x - pad_l) - ::abs(output_x - (input_dim_x + pad_l - 1)) - output_x + 2 * pad_l + input_dim_x - 1 - o_start_x + i_start_x; auto input_y = ::abs(output_y - pad_t) - ::abs(output_y - (input_dim_y + pad_t - 1)) - output_y + 2 * pad_t + input_dim_y - 1 - o_start_y + i_start_y; return thrust::make_pair<int64_t, int64_t>( input_offset + input_y * input_dim_x + input_x, output_offset + output_y * output_dim_x + output_x); } template<typename scalar_t> __global__ void reflection_pad1d_out_kernel( scalar_t * input, scalar_t * output, int64_t input_w, int64_t pad_l, int64_t pad_r) { auto output_x = threadIdx.x + blockIdx.x * blockDim.x; auto output_w = input_w + pad_l + pad_r; if (output_x < output_w) { auto index_pair = get_index_mapping1d(input_w, output_w, output_x, pad_l); output[index_pair.second] = input[index_pair.first]; } } template <typename scalar_t> __global__ void reflection_pad1d_backward_out_kernel( scalar_t * grad_input, scalar_t * grad_output, int64_t input_w, int64_t pad_l, int64_t pad_r) { auto output_x = threadIdx.x + blockIdx.x * blockDim.x; auto output_w = input_w + pad_l + pad_r; if (output_x < output_w) { auto index_pair = get_index_mapping1d(input_w, output_w, output_x, pad_l); gpuAtomicAdd( &grad_input[index_pair.first], grad_output[index_pair.second]); } } template<typename scalar_t> __global__ void reflection_pad2d_out_kernel( scalar_t * input, scalar_t * output, int64_t input_dim_x, int64_t input_dim_y, int pad_t, int pad_b, int pad_l, int pad_r) { auto output_xy = threadIdx.x + blockIdx.x * blockDim.x; auto output_dim_x = input_dim_x + pad_l + pad_r; auto output_dim_y = input_dim_y + pad_t + pad_b; if (output_xy < output_dim_x * output_dim_y) { auto index_pair = get_index_mapping2d( input_dim_x, input_dim_y, output_dim_x, output_dim_y, pad_l, pad_t, output_xy); output[index_pair.second] = input[index_pair.first]; } } template <typename scalar_t> __global__ void reflection_pad2d_backward_out_kernel( scalar_t * grad_input, scalar_t * grad_output, int64_t input_dim_x, int64_t input_dim_y, int pad_t, int pad_b, int pad_l, int pad_r) { auto output_xy = threadIdx.x + blockIdx.x * blockDim.x; auto output_dim_x = input_dim_x + pad_l + pad_r; auto output_dim_y = input_dim_y + pad_t + pad_b; if (output_xy < output_dim_x * output_dim_y) { auto index_pair = get_index_mapping2d( input_dim_x, input_dim_y, output_dim_x, output_dim_y, pad_l, pad_t, output_xy); gpuAtomicAdd(&grad_input[index_pair.first], grad_output[index_pair.second]); } } void reflection_pad1d_out_template( Tensor &output, const Tensor &input_, IntArrayRef padding) { TORCH_CHECK(canUse32BitIndexMath(input_), "input tensor must fit into 32-bit index math"); int64_t dim_plane = 0; int64_t dim_w = 1; int64_t nbatch = 1; TORCH_CHECK( (input_.ndimension() == 2 && input_.size(1) != 0) || (input_.ndimension() == 3 && input_.size(1) != 0 && input_.size(2) != 0), "2D or 3D (batch mode) tensor expected for input, but got: ", input_); if (input_.ndimension() == 3) { nbatch = input_.size(0); dim_plane++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t nplane = input_.size(dim_plane); int64_t input_w = input_.size(dim_w); int64_t output_w = input_w + pad_l + pad_r; TORCH_CHECK(pad_l < input_w && pad_r < input_w, "Padding size should be less " "than the corresponding input dimension, but got: padding (", pad_l, ", ", pad_r, ") at dimension ", dim_w, " of input ", input_); TORCH_CHECK(output_w >= 1, "input (W: ", input_w, ")is too small. Calculated output W: ", output_w); if (input_.ndimension() == 2) { output.resize_({nplane, output_w}); } else { output.resize_({nbatch, nplane, output_w}); } if (output.numel() == 0) { return; } dim3 block_size(output_w > 256 ? 256 : output_w); dim3 grid_size((int) ::ceil(output_w / 256.0), nplane, nbatch); Tensor input = input_.contiguous(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "reflection_pad1d_out_template", [&] { hipLaunchKernelGGL(( reflection_pad1d_out_kernel), dim3(grid_size), dim3(block_size), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), input_w, pad_l, pad_r); C10_HIP_KERNEL_LAUNCH_CHECK(); } ); } void reflection_pad1d_backward_out_template( Tensor & grad_input, const Tensor & grad_output_, const Tensor & input, IntArrayRef padding) { if (grad_input.numel() == 0) { return; } TORCH_CHECK(canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(canUse32BitIndexMath(grad_output_), "input tensor must fit into 32-bit index math"); int64_t dim_plane = 0; int64_t dim_w = 1; int64_t nbatch = 1; if (input.ndimension() == 3) { nbatch = input.size(0); dim_plane++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t nplane = input.size(dim_plane); int64_t input_w = input.size(dim_w); int64_t output_w = input_w + pad_l + pad_r; Tensor grad_output = grad_output_.contiguous(); TORCH_CHECK(output_w == grad_output.size(dim_w), "gradOutput width unexpected. Expected: ", output_w, ", Got: ", grad_output.size(dim_w)); dim3 block_size(output_w > 256 ? 256 : output_w); dim3 grid_size((int) ::ceil(output_w / 256.0), nplane, nbatch); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, grad_input.scalar_type(), "reflection_pad1d_backward_out_template", [&] { hipLaunchKernelGGL(( reflection_pad1d_backward_out_kernel), dim3(grid_size), dim3(block_size), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), input_w, pad_l, pad_r); C10_HIP_KERNEL_LAUNCH_CHECK(); } ); } void reflection_pad2d_out_template( Tensor &output, const Tensor &input_, IntArrayRef padding) { TORCH_CHECK(canUse32BitIndexMath(input_), "input tensor must fit into 32-bit index math"); int plane_dim = 0; int dim_h = 1; int dim_w = 2; int nbatch = 1; bool valid_dims = input_.size(1) != 0 && input_.size(2) != 0; TORCH_CHECK( (input_.ndimension() == 3 && valid_dims) || (input_.ndimension() == 4 && valid_dims && input_.size(3) != 0), "3D or 4D (batch mode) tensor expected for input, but got: ", input_); if (input_.ndimension() == 4) { nbatch = input_.size(0); plane_dim++; dim_h++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t pad_t = padding[2]; int64_t pad_b = padding[3]; int nplane = input_.size(plane_dim); int input_h = input_.size(dim_h); int input_w = input_.size(dim_w); TORCH_CHECK(pad_l < input_w && pad_r < input_w, "Padding size should be less than the corresponding input dimension, but " "got: padding (", pad_l, ", ", pad_r, ") at dimension ", dim_w, " of input ", input_.sizes()); TORCH_CHECK(pad_t < input_h && pad_b < input_h, "Padding size should be less than the corresponding input dimension, but " "got: padding (", pad_t, ", ", pad_b, ") at dimension ", dim_h, " of input ", input_.sizes()); int output_h = input_h + pad_t + pad_b; int output_w = input_w + pad_l + pad_r; TORCH_CHECK(output_w >= 1 || output_h >= 1, "input (H: ", input_h, ", W: ", input_w, ")is too small. Calculated " "output H: ", output_h, " W: ", output_w); if (input_.ndimension() == 3) { output.resize_({nplane, output_h, output_w}); } else { output.resize_({nbatch, nplane, output_h, output_w}); } if (output.numel() == 0) { return; } Tensor input = input_.contiguous(); int output_plane_size = output_h * output_w; dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size); dim3 grid_size( (int) ::ceil(output_plane_size/256.0), nplane, nbatch); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "reflection_pad2d_out_template", [&] { hipLaunchKernelGGL(( reflection_pad2d_out_kernel), dim3(grid_size), dim3(block_size), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), input_w, input_h, pad_t, pad_b, pad_l, pad_r); C10_HIP_KERNEL_LAUNCH_CHECK(); } ); } void reflection_pad2d_backward_out_template( Tensor &grad_input, const Tensor &grad_output_, const Tensor &input, IntArrayRef padding) { if (grad_input.numel() == 0) { return; } TORCH_CHECK(canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(canUse32BitIndexMath(grad_output_), "output gradient tensor must fit into 32-bit index math"); int plane_dim = 0; int dim_h = 1; int dim_w = 2; int nbatch = 1; if (input.ndimension() == 4) { nbatch = input.size(0); plane_dim++; dim_h++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t pad_t = padding[2]; int64_t pad_b = padding[3]; int nplane = input.size(plane_dim); int input_h = input.size(dim_h); int input_w = input.size(dim_w); int output_h = input_h + pad_t + pad_b; int output_w = input_w + pad_l + pad_r; TORCH_CHECK(output_w == grad_output_.size(dim_w), "grad_output width " "unexpected. Expected: ", output_w, ", Got: ", grad_output_.size(dim_w)); TORCH_CHECK(output_h == grad_output_.size(dim_h), "grad_output height " "unexpected. Expected: ", output_h, ", Got: ", grad_output_.size(dim_h)); Tensor grad_output = grad_output_.contiguous(); int output_plane_size = output_h * output_w; dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size); dim3 grid_size( (int) ::ceil(output_plane_size/256.0), nplane, nbatch); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "reflection_pad2d_backward_out_template", [&] { hipLaunchKernelGGL(( reflection_pad2d_backward_out_kernel), dim3(grid_size), dim3(block_size), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), input_w, input_h, pad_t, pad_b, pad_l, pad_r); C10_HIP_KERNEL_LAUNCH_CHECK(); } ); } } // namespace Tensor& reflection_pad1d_out_cuda(const Tensor& input, IntArrayRef padding, Tensor& output) { reflection_pad1d_out_template(output, input, padding); return output; } Tensor reflection_pad1d_cuda(const Tensor& input, IntArrayRef padding) { auto output = at::empty({0}, input.options()); reflection_pad1d_out_template(output, input, padding); return output; } Tensor& reflection_pad1d_backward_out_cuda(const Tensor& grad_output, const Tensor& input, IntArrayRef padding, Tensor& grad_input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad1d_backward_out_cuda"); grad_input.resize_as_(input); grad_input.zero_(); reflection_pad1d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } Tensor reflection_pad1d_backward_cuda( const Tensor& grad_output, const Tensor& input, IntArrayRef padding) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad1d_backward_cuda"); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); reflection_pad1d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } Tensor& reflection_pad2d_out_cuda(const Tensor& input, IntArrayRef padding, Tensor& output) { reflection_pad2d_out_template(output, input, padding); return output; } Tensor reflection_pad2d_cuda(const Tensor& input, IntArrayRef padding) { auto output = at::empty({0}, input.options()); reflection_pad2d_out_template(output, input, padding); return output; } Tensor& reflection_pad2d_backward_out_cuda(const Tensor& grad_output, const Tensor& input, IntArrayRef padding, Tensor& grad_input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad2d_backward_out_cuda"); grad_input.resize_as_(input); grad_input.zero_(); reflection_pad2d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } Tensor reflection_pad2d_backward_cuda( const Tensor& grad_output, const Tensor& input, IntArrayRef padding) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad2d_backward_cuda"); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); reflection_pad2d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } } // namespace native } // namespace at
ecd815a39f45a589e89d39fc5361f5e64d4004db.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> // keeping THC headers for gpuAtomicAdd #include <THC/THCAtomics.cuh> #include <thrust/pair.h> namespace at { namespace native { namespace { using at::cuda::detail::canUse32BitIndexMath; __device__ inline thrust::pair<int64_t, int64_t> get_index_mapping1d( int64_t input_w, int64_t output_w, int64_t output_x, int64_t pad_l) { // 3D grid of 1D blocks auto input_offset = (blockIdx.y + blockIdx.z * gridDim.y) * input_w; auto output_offset = (blockIdx.y + blockIdx.z * gridDim.y) * output_w; auto i_start_x = ::max(int64_t(0), -pad_l); auto o_start_x = ::max(int64_t(0), pad_l); int64_t input_x = ::abs(output_x - pad_l) - ::abs(output_x - (input_w + pad_l - 1)) - output_x + 2 * pad_l + input_w - 1 - o_start_x + i_start_x; return thrust::make_pair<int64_t, int64_t>( input_offset + input_x, output_offset + output_x); } __device__ inline thrust::pair<int64_t, int64_t> get_index_mapping2d( int64_t input_dim_x, int64_t input_dim_y, int64_t output_dim_x, int64_t output_dim_y, int64_t pad_l, int64_t pad_t, int64_t output_xy) { // 3D grid of 1D blocks auto input_offset = (blockIdx.y + blockIdx.z * gridDim.y) * input_dim_x * input_dim_y; auto output_offset = (blockIdx.y + blockIdx.z * gridDim.y) * output_dim_x * output_dim_y; auto output_x = output_xy % output_dim_x; auto output_y = output_xy / output_dim_x; auto i_start_x = ::max(int64_t(0), -pad_l); auto i_start_y = ::max(int64_t(0), -pad_t); auto o_start_x = ::max(int64_t(0), pad_l); auto o_start_y = ::max(int64_t(0), pad_t); auto input_x = ::abs(output_x - pad_l) - ::abs(output_x - (input_dim_x + pad_l - 1)) - output_x + 2 * pad_l + input_dim_x - 1 - o_start_x + i_start_x; auto input_y = ::abs(output_y - pad_t) - ::abs(output_y - (input_dim_y + pad_t - 1)) - output_y + 2 * pad_t + input_dim_y - 1 - o_start_y + i_start_y; return thrust::make_pair<int64_t, int64_t>( input_offset + input_y * input_dim_x + input_x, output_offset + output_y * output_dim_x + output_x); } template<typename scalar_t> __global__ void reflection_pad1d_out_kernel( scalar_t * input, scalar_t * output, int64_t input_w, int64_t pad_l, int64_t pad_r) { auto output_x = threadIdx.x + blockIdx.x * blockDim.x; auto output_w = input_w + pad_l + pad_r; if (output_x < output_w) { auto index_pair = get_index_mapping1d(input_w, output_w, output_x, pad_l); output[index_pair.second] = input[index_pair.first]; } } template <typename scalar_t> __global__ void reflection_pad1d_backward_out_kernel( scalar_t * grad_input, scalar_t * grad_output, int64_t input_w, int64_t pad_l, int64_t pad_r) { auto output_x = threadIdx.x + blockIdx.x * blockDim.x; auto output_w = input_w + pad_l + pad_r; if (output_x < output_w) { auto index_pair = get_index_mapping1d(input_w, output_w, output_x, pad_l); gpuAtomicAdd( &grad_input[index_pair.first], grad_output[index_pair.second]); } } template<typename scalar_t> __global__ void reflection_pad2d_out_kernel( scalar_t * input, scalar_t * output, int64_t input_dim_x, int64_t input_dim_y, int pad_t, int pad_b, int pad_l, int pad_r) { auto output_xy = threadIdx.x + blockIdx.x * blockDim.x; auto output_dim_x = input_dim_x + pad_l + pad_r; auto output_dim_y = input_dim_y + pad_t + pad_b; if (output_xy < output_dim_x * output_dim_y) { auto index_pair = get_index_mapping2d( input_dim_x, input_dim_y, output_dim_x, output_dim_y, pad_l, pad_t, output_xy); output[index_pair.second] = input[index_pair.first]; } } template <typename scalar_t> __global__ void reflection_pad2d_backward_out_kernel( scalar_t * grad_input, scalar_t * grad_output, int64_t input_dim_x, int64_t input_dim_y, int pad_t, int pad_b, int pad_l, int pad_r) { auto output_xy = threadIdx.x + blockIdx.x * blockDim.x; auto output_dim_x = input_dim_x + pad_l + pad_r; auto output_dim_y = input_dim_y + pad_t + pad_b; if (output_xy < output_dim_x * output_dim_y) { auto index_pair = get_index_mapping2d( input_dim_x, input_dim_y, output_dim_x, output_dim_y, pad_l, pad_t, output_xy); gpuAtomicAdd(&grad_input[index_pair.first], grad_output[index_pair.second]); } } void reflection_pad1d_out_template( Tensor &output, const Tensor &input_, IntArrayRef padding) { TORCH_CHECK(canUse32BitIndexMath(input_), "input tensor must fit into 32-bit index math"); int64_t dim_plane = 0; int64_t dim_w = 1; int64_t nbatch = 1; TORCH_CHECK( (input_.ndimension() == 2 && input_.size(1) != 0) || (input_.ndimension() == 3 && input_.size(1) != 0 && input_.size(2) != 0), "2D or 3D (batch mode) tensor expected for input, but got: ", input_); if (input_.ndimension() == 3) { nbatch = input_.size(0); dim_plane++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t nplane = input_.size(dim_plane); int64_t input_w = input_.size(dim_w); int64_t output_w = input_w + pad_l + pad_r; TORCH_CHECK(pad_l < input_w && pad_r < input_w, "Padding size should be less " "than the corresponding input dimension, but got: padding (", pad_l, ", ", pad_r, ") at dimension ", dim_w, " of input ", input_); TORCH_CHECK(output_w >= 1, "input (W: ", input_w, ")is too small. Calculated output W: ", output_w); if (input_.ndimension() == 2) { output.resize_({nplane, output_w}); } else { output.resize_({nbatch, nplane, output_w}); } if (output.numel() == 0) { return; } dim3 block_size(output_w > 256 ? 256 : output_w); dim3 grid_size((int) ::ceil(output_w / 256.0), nplane, nbatch); Tensor input = input_.contiguous(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "reflection_pad1d_out_template", [&] { reflection_pad1d_out_kernel<<< grid_size, block_size, 0, at::cuda::getCurrentCUDAStream()>>>( input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), input_w, pad_l, pad_r); C10_CUDA_KERNEL_LAUNCH_CHECK(); } ); } void reflection_pad1d_backward_out_template( Tensor & grad_input, const Tensor & grad_output_, const Tensor & input, IntArrayRef padding) { if (grad_input.numel() == 0) { return; } TORCH_CHECK(canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(canUse32BitIndexMath(grad_output_), "input tensor must fit into 32-bit index math"); int64_t dim_plane = 0; int64_t dim_w = 1; int64_t nbatch = 1; if (input.ndimension() == 3) { nbatch = input.size(0); dim_plane++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t nplane = input.size(dim_plane); int64_t input_w = input.size(dim_w); int64_t output_w = input_w + pad_l + pad_r; Tensor grad_output = grad_output_.contiguous(); TORCH_CHECK(output_w == grad_output.size(dim_w), "gradOutput width unexpected. Expected: ", output_w, ", Got: ", grad_output.size(dim_w)); dim3 block_size(output_w > 256 ? 256 : output_w); dim3 grid_size((int) ::ceil(output_w / 256.0), nplane, nbatch); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, grad_input.scalar_type(), "reflection_pad1d_backward_out_template", [&] { reflection_pad1d_backward_out_kernel<<< grid_size, block_size, 0, at::cuda::getCurrentCUDAStream()>>>( grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), input_w, pad_l, pad_r); C10_CUDA_KERNEL_LAUNCH_CHECK(); } ); } void reflection_pad2d_out_template( Tensor &output, const Tensor &input_, IntArrayRef padding) { TORCH_CHECK(canUse32BitIndexMath(input_), "input tensor must fit into 32-bit index math"); int plane_dim = 0; int dim_h = 1; int dim_w = 2; int nbatch = 1; bool valid_dims = input_.size(1) != 0 && input_.size(2) != 0; TORCH_CHECK( (input_.ndimension() == 3 && valid_dims) || (input_.ndimension() == 4 && valid_dims && input_.size(3) != 0), "3D or 4D (batch mode) tensor expected for input, but got: ", input_); if (input_.ndimension() == 4) { nbatch = input_.size(0); plane_dim++; dim_h++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t pad_t = padding[2]; int64_t pad_b = padding[3]; int nplane = input_.size(plane_dim); int input_h = input_.size(dim_h); int input_w = input_.size(dim_w); TORCH_CHECK(pad_l < input_w && pad_r < input_w, "Padding size should be less than the corresponding input dimension, but " "got: padding (", pad_l, ", ", pad_r, ") at dimension ", dim_w, " of input ", input_.sizes()); TORCH_CHECK(pad_t < input_h && pad_b < input_h, "Padding size should be less than the corresponding input dimension, but " "got: padding (", pad_t, ", ", pad_b, ") at dimension ", dim_h, " of input ", input_.sizes()); int output_h = input_h + pad_t + pad_b; int output_w = input_w + pad_l + pad_r; TORCH_CHECK(output_w >= 1 || output_h >= 1, "input (H: ", input_h, ", W: ", input_w, ")is too small. Calculated " "output H: ", output_h, " W: ", output_w); if (input_.ndimension() == 3) { output.resize_({nplane, output_h, output_w}); } else { output.resize_({nbatch, nplane, output_h, output_w}); } if (output.numel() == 0) { return; } Tensor input = input_.contiguous(); int output_plane_size = output_h * output_w; dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size); dim3 grid_size( (int) std::ceil(output_plane_size/256.0), nplane, nbatch); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "reflection_pad2d_out_template", [&] { reflection_pad2d_out_kernel<<< grid_size, block_size, 0, at::cuda::getCurrentCUDAStream()>>>( input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), input_w, input_h, pad_t, pad_b, pad_l, pad_r); C10_CUDA_KERNEL_LAUNCH_CHECK(); } ); } void reflection_pad2d_backward_out_template( Tensor &grad_input, const Tensor &grad_output_, const Tensor &input, IntArrayRef padding) { if (grad_input.numel() == 0) { return; } TORCH_CHECK(canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(canUse32BitIndexMath(grad_output_), "output gradient tensor must fit into 32-bit index math"); int plane_dim = 0; int dim_h = 1; int dim_w = 2; int nbatch = 1; if (input.ndimension() == 4) { nbatch = input.size(0); plane_dim++; dim_h++; dim_w++; } int64_t pad_l = padding[0]; int64_t pad_r = padding[1]; int64_t pad_t = padding[2]; int64_t pad_b = padding[3]; int nplane = input.size(plane_dim); int input_h = input.size(dim_h); int input_w = input.size(dim_w); int output_h = input_h + pad_t + pad_b; int output_w = input_w + pad_l + pad_r; TORCH_CHECK(output_w == grad_output_.size(dim_w), "grad_output width " "unexpected. Expected: ", output_w, ", Got: ", grad_output_.size(dim_w)); TORCH_CHECK(output_h == grad_output_.size(dim_h), "grad_output height " "unexpected. Expected: ", output_h, ", Got: ", grad_output_.size(dim_h)); Tensor grad_output = grad_output_.contiguous(); int output_plane_size = output_h * output_w; dim3 block_size(output_plane_size > 256 ? 256 : output_plane_size); dim3 grid_size( (int) std::ceil(output_plane_size/256.0), nplane, nbatch); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "reflection_pad2d_backward_out_template", [&] { reflection_pad2d_backward_out_kernel<<< grid_size, block_size, 0, at::cuda::getCurrentCUDAStream()>>>( grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), input_w, input_h, pad_t, pad_b, pad_l, pad_r); C10_CUDA_KERNEL_LAUNCH_CHECK(); } ); } } // namespace Tensor& reflection_pad1d_out_cuda(const Tensor& input, IntArrayRef padding, Tensor& output) { reflection_pad1d_out_template(output, input, padding); return output; } Tensor reflection_pad1d_cuda(const Tensor& input, IntArrayRef padding) { auto output = at::empty({0}, input.options()); reflection_pad1d_out_template(output, input, padding); return output; } Tensor& reflection_pad1d_backward_out_cuda(const Tensor& grad_output, const Tensor& input, IntArrayRef padding, Tensor& grad_input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad1d_backward_out_cuda"); grad_input.resize_as_(input); grad_input.zero_(); reflection_pad1d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } Tensor reflection_pad1d_backward_cuda( const Tensor& grad_output, const Tensor& input, IntArrayRef padding) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad1d_backward_cuda"); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); reflection_pad1d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } Tensor& reflection_pad2d_out_cuda(const Tensor& input, IntArrayRef padding, Tensor& output) { reflection_pad2d_out_template(output, input, padding); return output; } Tensor reflection_pad2d_cuda(const Tensor& input, IntArrayRef padding) { auto output = at::empty({0}, input.options()); reflection_pad2d_out_template(output, input, padding); return output; } Tensor& reflection_pad2d_backward_out_cuda(const Tensor& grad_output, const Tensor& input, IntArrayRef padding, Tensor& grad_input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad2d_backward_out_cuda"); grad_input.resize_as_(input); grad_input.zero_(); reflection_pad2d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } Tensor reflection_pad2d_backward_cuda( const Tensor& grad_output, const Tensor& input, IntArrayRef padding) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("reflection_pad2d_backward_cuda"); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); reflection_pad2d_backward_out_template( grad_input, grad_output, input, padding); return grad_input; } } // namespace native } // namespace at
814713048e48789c790fe439e7aaec5731013fab.hip
// !!! This is a file automatically generated by hipify!!! #define CUDPP_STATIC_LIB // #include "../nvidia_sdk/C/common/inc/cutil_inline.h" #include "/Volumes/Macintosh HD/Developer/NVIDIA/CUDA-7.5/samples/common/inc/helper_cuda.h" // lib above replaced w/this one at CUDA 5.0 #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdio.h> /* defines printf for tests */ #include <time.h> /* defines time_t for timings in the test */ #include <math.h> #ifdef linux # include <endian.h> /* attempt to define endianness */ #endif //#include "debruijn.h" #include "kmer.h" #include "graph.h" #include "cudpp.h" #include "utils.h" #include "common.h" #include "gpuhash.h" #include "gpuhash_device.h" //#include "utils.cpp" #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \ __BYTE_ORDER == __LITTLE_ENDIAN) || \ (defined(i386) || defined(__i386__) || defined(__i486__) || \ defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL)) # define HASH_LITTLE_ENDIAN 1 # define HASH_BIG_ENDIAN 0 #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \ __BYTE_ORDER == __BIG_ENDIAN) || \ (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel)) # define HASH_LITTLE_ENDIAN 0 # define HASH_BIG_ENDIAN 1 #else # define HASH_LITTLE_ENDIAN 0 # define HASH_BIG_ENDIAN 0 #endif /* unsigned int _host_hash_h(KEY_T key, unsigned int bucketCount){ return ((C0+C1*key)% LARGE_PRIME )% bucketCount; } VALUE_T getHashValue2Host(KEY_T key, TABLE_PTR T,unsigned int *bucketSize, unsigned int bucketCount){ unsigned int bucket=_host_hash_h(key,bucketCount); unsigned int l=0; unsigned int r=bucketSize[bucket]; unsigned int offset=bucket * BUCKET_SIZE; unsigned int mid=(l+r)>>1; while(l<r){ mid =l+((r-l)/2); if( T[offset+(mid<<1)] <key) { l=mid+1; }else { r=mid; } } if(l < bucketSize[bucket] && T[offset+(l<<1)]==key){ return T[offset+(l<<1)+1]; }else { return MAX_INT; } } */ /*** * Inline Printing Routine for l and e structures */ inline void printData(unsigned int * d_lstart, unsigned int * d_lcount, unsigned int * d_estart, unsigned int * d_ecount, unsigned int length) { unsigned int * h_lstart; unsigned int * h_lcount; unsigned int * h_estart; unsigned int * h_ecount; h_lstart = (unsigned int *) malloc(sizeof(unsigned int) * length); h_lcount = (unsigned int *) malloc(sizeof(unsigned int) * length); h_estart = (unsigned int *) malloc(sizeof(unsigned int) * length); h_ecount = (unsigned int *) malloc(sizeof(unsigned int) * length); checkCudaErrors( hipMemcpy(h_lstart, d_lstart, length * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_lcount, d_lcount, length * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_estart, d_estart, length * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_ecount, d_ecount, length * sizeof(unsigned int), hipMemcpyDeviceToHost)); for (unsigned int i = 0; i < length; i++) { printf("[%4u]\t es:%4u\t ec:%4u\t ls:%4u\t lc:%4u\n", i, h_estart[i], h_ecount[i], h_lstart[i], h_lcount[i]); } free(h_lstart); free(h_lcount); free(h_estart); free(h_ecount); } //__global__ void debruijnCount(unsigned int k,unsigned long kmerCount, unsigned char * idata,unsigned int * icount,unsigned int * vcount, unsigned int * lcount,unsigned int * ecount,unsigned int validBitMask){ /* * This kernel works on each l-mer ,counting edges of the graph. */ __global__ void debruijnCount(KEY_PTR lmerKeys, /* lmer keys */ VALUE_PTR lmerValues, /* lmer frequency */ unsigned int lmerCount, /* total lmers */ KEY_PTR TK, /* Keys' pointer for Hash table*/ VALUE_PTR TV, /* Value pointer for Hash table*/ unsigned int * bucketSeed, /* bucketSize: size of each bucket (it should be renamed to bucketSize)*/ unsigned int bucketCount, /* total buckets */ unsigned int * lcount, /* leaving edge count array : OUT */ unsigned int * ecount, /* entering edge count array: OUT */ KEY_T validBitMask /* bit mask for K length encoded bits*/ ) { unsigned int tid = (blockDim.x * blockDim.y * gridDim.x * blockIdx.y) + (blockDim.x * blockDim.y * blockIdx.x) + (blockDim.x * threadIdx.y) + threadIdx.x; if (tid < lmerCount) { KEY_T lmer = lmerKeys[tid]; VALUE_T lmerValue = lmerValues[tid]; KEY_T prefix = (lmer & (validBitMask << 2)) >> 2; KEY_T suffix = (lmer & validBitMask); KEY_T lomask = 3; VALUE_T prefixIndex = getHashValue(prefix, TK, TV, bucketSeed, bucketCount); VALUE_T suffixIndex = getHashValue(suffix, TK, TV, bucketSeed, bucketCount); KEY_T transitionTo = (lmer & lomask); KEY_T transitionFrom = ((lmer >> __popcll(validBitMask)) & lomask); //atomicAdd(lcount+(prefixIndex<<2 )+transition,lmerValue); //atomicAdd(ecount+(suffixIndex<<2)+transition,lmerValue); lcount[(prefixIndex << 2) + transitionTo] = lmerValue; ecount[(suffixIndex << 2) + transitionFrom] = lmerValue; } } /** * This is cpu version for same kernel. for Debugging purpose only */ void debruijnCountHost(KEY_PTR lmerKeys, VALUE_PTR lmerValues, unsigned int lmerCount, KEY_PTR TK, VALUE_PTR TV, unsigned int * bucketSeed, unsigned int bucketCount, unsigned int * lcount, unsigned int * ecount, KEY_T validBitMask, unsigned int bitCount, unsigned int tid) { // unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if (tid < lmerCount) { KEY_T lmer = lmerKeys[tid]; VALUE_T lmerValue = lmerValues[tid]; KEY_T prefix = (lmer & (validBitMask << 2)) >> 2; KEY_T suffix = (lmer & validBitMask); KEY_T lomask = 3; unsigned int b; VALUE_T prefixIndex = host_getHashValue(prefix, TK, TV, bucketSeed, bucketCount, &b); VALUE_T suffixIndex = host_getHashValue(suffix, TK, TV, bucketSeed, bucketCount, &b); KEY_T transitionTo = (lmer & lomask); KEY_T transitionFrom = ((lmer >> bitCount) & lomask); //atomicAdd(lcount+(prefixIndex<<2 )+transition,lmerValue); //atomicAdd(ecount+(suffixIndex<<2)+transition,lmerValue); if (lcount[(prefixIndex << 2) + transitionTo] > 0) { lcount[(prefixIndex << 2) + transitionTo] = lmerValue; } else { lcount[(prefixIndex << 2) + transitionTo] = lmerValue; } if (ecount[(suffixIndex << 2) + transitionFrom] > 0) { ecount[(suffixIndex << 2) + transitionFrom] = lmerValue; } else { ecount[(suffixIndex << 2) + transitionFrom] = lmerValue; } } } /* * stub for debruijnCountHost for debugging purpose */ void verifyDebruijnCountHost(KEY_PTR d_lmerKeys, VALUE_PTR d_lmerValues, unsigned int lmerCount, KEY_PTR d_TK, VALUE_PTR d_TV, unsigned int * d_bucketSeed, unsigned int bucketCount, unsigned int * d_lcount, unsigned int * d_ecount, KEY_T validBitMask, unsigned int kmerCount) { KEY_PTR h_lmerKeys; VALUE_PTR h_lmerValues; KEY_PTR h_TK; VALUE_PTR h_TV; unsigned int * h_bucketSeed; unsigned int * h_lcount; unsigned int * h_ecount; unsigned int * hq_lcount; unsigned int * hq_ecount; h_lmerKeys = (KEY_PTR) malloc(lmerCount * sizeof(KEY_T)); h_lmerValues = (VALUE_PTR) malloc(lmerCount * sizeof(VALUE_T)); h_TK = (KEY_PTR) malloc(bucketCount * BUCKET_KEY_SIZE); h_TV = (VALUE_PTR) malloc(bucketCount * BUCKET_VALUE_SIZE); h_bucketSeed = (unsigned int *) malloc(bucketCount * sizeof(unsigned int)); h_lcount = (unsigned int *) malloc(4 * kmerCount * sizeof(unsigned int)); h_ecount = (unsigned int *) malloc(4 * kmerCount * sizeof(unsigned int)); hq_lcount = (unsigned int *) malloc(4 * kmerCount * sizeof(unsigned int)); hq_ecount = (unsigned int *) malloc(4 * kmerCount * sizeof(unsigned int)); checkCudaErrors( hipMemcpy(h_lmerKeys, d_lmerKeys, lmerCount * KEY_SIZE, hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_lmerValues, d_lmerValues, lmerCount * VALUE_SIZE, hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_TK, d_TK, bucketCount * BUCKET_KEY_SIZE, hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_TV, d_TV, bucketCount * BUCKET_VALUE_SIZE, hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_bucketSeed, d_bucketSeed, bucketCount * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(hq_lcount, d_lcount, 4 * kmerCount * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(hq_ecount, d_ecount, 4 * kmerCount * sizeof(unsigned int), hipMemcpyDeviceToHost)); memset(h_lcount, 0, 4 * kmerCount * sizeof(unsigned int)); memset(h_ecount, 0, 4 * kmerCount * sizeof(unsigned int)); unsigned int bitCount = 0; KEY_T bit = 1; while (bit != 0) { if (bit & validBitMask) bitCount++; bit = bit << 1; } unsigned int edgesCount = 0; for (unsigned int i = 0; i < lmerCount; i++) { debruijnCountHost(h_lmerKeys, h_lmerValues, lmerCount, h_TK, h_TV, h_bucketSeed, bucketCount, h_lcount, h_ecount, validBitMask, bitCount, i); edgesCount += h_lmerValues[i]; } unsigned int esum = 0; unsigned int qesum = 0; unsigned int lsum = 0; unsigned int qlsum = 0; unsigned int ei = 0; unsigned int li = 0; unsigned int qei = 0; unsigned int qli = 0; for (int j = 0; j < 4 * kmerCount; j++) { esum += h_ecount[j]; lsum += h_lcount[j]; qesum += hq_ecount[j]; qlsum += hq_lcount[j]; if (esum > edgesCount && ei < 1) ei = j; if (lsum > edgesCount && li < 1) li = j; if (qesum > edgesCount && qei < 1) qei = j; if (qlsum > edgesCount && qli < 1) qli = j; } printf( "lmerCount: %u, esum: %u, lsum: %u, ei: %u li:%u \n qesum:%u , qlsum:%u \n", edgesCount, esum, lsum, ei, li, qesum, qlsum); unsigned int enc = 0; unsigned int lnc = 0; for (unsigned int k = 0; k < 4 * kmerCount; k++) { if (h_lcount[k] != hq_lcount[k]) lnc++; if (h_ecount[k] != hq_ecount[k]) enc++; } printf("enc: %u, lnc:%u \n", enc, lnc); free(h_lmerValues); free(h_TK); free(h_TV); free(h_bucketSeed); free(h_lcount); free(h_ecount); free(hq_lcount); free(hq_ecount); } /** * CPU prefix scan */ void prefixScan(unsigned int * h_out, unsigned int * h_in, unsigned int length, bool inclusive) { memset(h_out, 0, length * sizeof(unsigned int)); /*calculate gold*/ if (inclusive) { h_out[0] = h_in[0]; } else { h_out[0] = 0; } for (unsigned int i = 1; i < length; i++) { h_out[i] = h_out[i - 1] + h_in[i - (inclusive ? 0 : 1)]; } } /* * prefix sum validator **/ void validatePrefixScan(unsigned int * d_output, unsigned int * d_input, unsigned int length, bool inclusive) { unsigned int * h_input; unsigned int * h_output; unsigned int * hq_output; h_input = (unsigned int *) malloc(length * sizeof(unsigned int)); h_output = (unsigned int *) malloc(length * sizeof(unsigned int)); hq_output = (unsigned int *) malloc(length * sizeof(unsigned int)); checkCudaErrors( hipMemcpy(h_input, d_input, length * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(hq_output, d_output, length * sizeof(unsigned int), hipMemcpyDeviceToHost)); /* memset(h_output,0,length*sizeof(unsigned int)); if(inclusive) { h_output[0]=h_input[0];} else { h_output[0]=0; } for(unsigned int i=1;i<length;i++){ h_output[i]=h_output[i-1]+h_input[i-(inclusive?0:1)]; } */ prefixScan(h_output, h_input, length, inclusive); /*compare*/ for (unsigned int j = 0; j < length; j++) { if (h_output[j] != hq_output[j]) { printf("differnce at index:%u is gold:%u, cudpp\n", j, h_output[j], hq_output[j]); } } free(h_input); free(h_output); free(hq_output); } /* * This kernel works on a k-mer (l-1mer) which are vertices of the graph. */ __global__ void setupVertices(KEY_PTR kmerKeys, unsigned int kmerCount, KEY_PTR TK, VALUE_PTR TV, unsigned int * bucketSeed, unsigned int bucketCount, EulerVertex * ev, unsigned int * lcount, unsigned int * loffset, unsigned int * ecount, unsigned int * eoffset) { unsigned int tid = (blockDim.x * blockDim.y * gridDim.x * blockIdx.y) + (blockDim.x * blockDim.y * blockIdx.x) + (blockDim.x * threadIdx.y) + threadIdx.x; if (tid < kmerCount) { KEY_T key = kmerKeys[tid]; VALUE_T index = getHashValue(key, TK, TV, bucketSeed, bucketCount); ; ev[index].vid = key; ev[index].lp = loffset[(index << 2)]; ev[index].lcount = lcount[(index << 2)] + lcount[(index << 2) + 1] + lcount[(index << 2) + 2] + lcount[(index << 2) + 3]; ev[index].ep = eoffset[(index << 2)]; ev[index].ecount = ecount[(index << 2)] + ecount[(index << 2) + 1] + ecount[(index << 2) + 2] + ecount[(index << 2) + 3]; } } void setupVerticesHost(KEY_PTR kmerKeys, unsigned int kmerCount, KEY_PTR TK, VALUE_PTR TV, unsigned int * bucketSeed, unsigned int bucketCount, EulerVertex * ev, unsigned int * lcount, unsigned int * loffset, unsigned int * ecount, unsigned int * eoffset, unsigned int tid) { // unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if (tid < kmerCount) { KEY_T key = kmerKeys[tid]; unsigned int bucket; VALUE_T index = host_getHashValue(key, TK, TV, bucketSeed, bucketCount, &bucket); ; ev[index].vid = key; ev[index].lp = loffset[(index << 2)]; ev[index].lcount = lcount[(index << 2)] + lcount[(index << 2) + 1] + lcount[(index << 2) + 2] + lcount[(index << 2) + 3]; ev[index].ep = eoffset[(index << 2)]; ev[index].ecount = ecount[(index << 2)] + ecount[(index << 2) + 1] + ecount[(index << 2) + 2] + ecount[(index << 2) + 3]; } } /* * This kernel works on an l-mer, which represents an edge * in the debruijn Graph. */ __global__ void setupEdges( KEY_PTR lmerKeys, VALUE_PTR lmerValues, unsigned int * lmerOffsets, const unsigned int lmerCount, KEY_PTR TK, VALUE_PTR TV, unsigned int * bucketSeed, const unsigned int bucketCount, unsigned int * l, unsigned int * e, EulerEdge * ee, unsigned int * loffsets, unsigned int * eoffsets, const KEY_T validBitMask) { unsigned int tid = (blockDim.x * blockDim.y * gridDim.x * blockIdx.y) + (blockDim.x * blockDim.y * blockIdx.x) + (blockDim.x * threadIdx.y) + threadIdx.x; if (tid < lmerCount) { KEY_T lmer = lmerKeys[tid]; VALUE_T lmerValue = lmerValues[tid]; KEY_T prefix = (lmer & (validBitMask << 2)) >> 2; KEY_T suffix = (lmer & validBitMask); KEY_T lomask = 3; //prefix and suffix index must be less than kmer count VALUE_T prefixIndex = getHashValue(prefix, TK, TV, bucketSeed, bucketCount); VALUE_T suffixIndex = getHashValue(suffix, TK, TV, bucketSeed, bucketCount); KEY_T transitionTo = (lmer & lomask); KEY_T transitionFrom = ((lmer >> __popcll(validBitMask)) & lomask); unsigned int loffset = loffsets[(prefixIndex << 2) + transitionTo]; unsigned int eoffset = eoffsets[(suffixIndex << 2) + transitionFrom]; unsigned int lmerOffset = lmerOffsets[tid]; for (unsigned int i = 0; i < lmerValue; i++) { ee[lmerOffset].eid =lmerOffset; ee[lmerOffset].v1 = prefixIndex; ee[lmerOffset].v2 = suffixIndex; // lmerOffset; ee[lmerOffset].s = lmerValues[lmerCount - 1] + lmerOffsets[lmerCount - 1]; l[loffset] = lmerOffset; e[eoffset] = lmerOffset; loffset++; eoffset++; lmerOffset++; } } } void setupEdgesHost(KEY_PTR const lmerKeys, VALUE_PTR const lmerValues, unsigned int * const lmerOffsets, const unsigned int lmerCount, KEY_PTR const TK, VALUE_PTR const TV, unsigned int * const bucketSeed, const unsigned int bucketCount, unsigned int * const l, unsigned int * const e, EulerEdge * const ee, unsigned int * const loffsets, unsigned int * const eoffsets, const KEY_T validBitMask, const unsigned int tid) { // unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if (tid < lmerCount) { KEY_T lmer = lmerKeys[tid]; VALUE_T lmerValue = lmerValues[tid]; KEY_T prefix = (lmer & (validBitMask << 2)) >> 2; KEY_T suffix = (lmer & validBitMask); KEY_T lomask = 3; unsigned int bucket; //prefix and suffix index must be less than kmer count VALUE_T prefixIndex = host_getHashValue(prefix, TK, TV, bucketSeed, bucketCount, &bucket); VALUE_T suffixIndex = host_getHashValue(suffix, TK, TV, bucketSeed, bucketCount, &bucket); KEY_T transitionTo = (lmer & lomask); KEY_T transitionFrom = ((lmer >> 16) & lomask); unsigned int loffset = loffsets[(prefixIndex << 2) + transitionTo]; unsigned int eoffset = eoffsets[(suffixIndex << 2) + transitionFrom]; unsigned int lmerOffset = lmerOffsets[tid]; for (int i = 0; i < lmerValue; i++) { ee[lmerOffset].eid = lmerOffset; ee[lmerOffset].v1 = prefixIndex; ee[lmerOffset].v2 = suffixIndex; ee[lmerOffset].s = lmerValues[lmerCount - 1] + lmerOffsets[lmerCount - 1]; l[loffset] = lmerOffset; e[eoffset] = lmerOffset; loffset++; eoffset++; lmerOffset++; } } } void verifySetupEdges(KEY_PTR d_lmerKeys, VALUE_PTR d_lmerValues, unsigned int * d_lmerOffsets, const unsigned int lmerCount, KEY_PTR d_TK, VALUE_PTR d_TV, unsigned int * d_bucketSeed, const unsigned int bucketCount, unsigned int * d_l, unsigned int * d_e, EulerEdge * d_ee, unsigned int * d_lcount, unsigned int * d_loffsets, unsigned int * d_ecount, unsigned int * d_eoffsets, unsigned int kmerCount, unsigned int ecount, const KEY_T validBitMask) { KEY_PTR h_lmerKeys; VALUE_PTR h_lmerValues; unsigned int * h_lmerOffsets; KEY_PTR h_TK; VALUE_PTR h_TV; unsigned int * h_bucketSeed; unsigned int * h_l; unsigned int * h_e; EulerEdge * h_ee; unsigned int * h_loffsets; unsigned int * h_lcount; unsigned int * h_eoffsets; unsigned int * h_ecount; h_lmerKeys = (KEY_PTR) malloc(lmerCount * KEY_SIZE); h_lmerValues = (VALUE_PTR) malloc(lmerCount * VALUE_SIZE); h_lmerOffsets = (unsigned int *) malloc(lmerCount * sizeof(unsigned int)); h_TK = (KEY_PTR) malloc(bucketCount * BUCKET_KEY_SIZE); h_TV = (VALUE_PTR) malloc(bucketCount * BUCKET_VALUE_SIZE); h_bucketSeed = (unsigned int *) malloc(bucketCount * sizeof(unsigned int)); h_bucketSeed = (unsigned int *) malloc(bucketCount * sizeof(unsigned int)); h_l = (unsigned int *) malloc(ecount * sizeof(unsigned int)); h_e = (unsigned int *) malloc(ecount * sizeof(unsigned int)); h_ee = (EulerEdge *) malloc(ecount * sizeof(EulerEdge)); h_loffsets = (unsigned int *) malloc(kmerCount * 4 * sizeof(unsigned int)); h_eoffsets = (unsigned int *) malloc(kmerCount * 4 * sizeof(unsigned int)); h_lcount = (unsigned int *) malloc(kmerCount * 4 * sizeof(unsigned int)); h_ecount = (unsigned int *) malloc(kmerCount * 4 * sizeof(unsigned int)); checkCudaErrors( hipMemcpy(h_lmerKeys, d_lmerKeys, lmerCount * KEY_SIZE, hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_lmerValues, d_lmerValues, lmerCount * VALUE_SIZE, hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_lmerOffsets, d_lmerOffsets, lmerCount * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_TK, d_TK, bucketCount * BUCKET_KEY_SIZE, hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_TV, d_TV, bucketCount * BUCKET_VALUE_SIZE, hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_bucketSeed, d_bucketSeed, bucketCount * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_ee, d_ee, ecount * sizeof(EulerEdge), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_loffsets, d_loffsets, kmerCount * 4 * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_eoffsets, d_eoffsets, kmerCount * 4 * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_lcount, d_lcount, kmerCount * 4 * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_ecount, d_ecount, kmerCount * 4 * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_l, d_l, ecount * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_e, d_e, ecount * sizeof(unsigned int), hipMemcpyDeviceToHost)); printf(".....diff....\n"); for (unsigned int j = 0; j < (4 * kmerCount) - 1; j++) { if (h_lcount[j] != h_loffsets[j + 1] - h_loffsets[j]) { printf(" lcount mismatch j:[%u] lcount:[%u] diff:[%u]\n", j, h_lcount[j], h_loffsets[j + 1] - h_loffsets[j]); } if (h_ecount[j] != h_eoffsets[j + 1] - h_eoffsets[j]) { printf(" ecount mismatch j:[%u] ecount:[%u] diff:[%u]\n", j, h_ecount[j], h_eoffsets[j + 1] - h_eoffsets[j]); } } /* for(unsigned int k=0;k<4*kmerCount;k++){ printf("[%u]: loffset[%u] ,lcount[%u] ,eoffset[%u], ecount[%u]\n",k,h_loffsets[k],h_lcount[k],h_eoffsets[k],h_ecount[k]); } */ for (int i = 0; i < lmerCount; i++) { setupEdgesHost(h_lmerKeys, h_lmerValues, h_lmerOffsets, lmerCount, h_TK, h_TV, h_bucketSeed, bucketCount, h_l, h_e, h_ee, h_loffsets, h_eoffsets, validBitMask, i); } free(h_lmerKeys); free(h_lmerValues); free(h_lmerOffsets); free(h_TK); free(h_TV); free(h_bucketSeed); free(h_l); free(h_e); free(h_ee); free(h_loffsets); free(h_eoffsets); free(h_lcount); free(h_ecount); } void verifyleOffsets(unsigned int * d_lOffsets, unsigned int * d_lcount, unsigned int * d_eOffsets, unsigned int * d_ecount, unsigned int length, unsigned int ecount) { unsigned int * h_lOffsets; unsigned int * h_eOffsets; unsigned int * h_lcount; unsigned int * h_ecount; h_lOffsets = (unsigned int*) malloc(length * sizeof(unsigned int)); h_eOffsets = (unsigned int *) malloc(length * sizeof(unsigned int)); h_lcount = (unsigned int*) malloc(length * sizeof(unsigned int)); h_ecount = (unsigned int *) malloc(length * sizeof(unsigned int)); checkCudaErrors( hipMemcpy(h_lOffsets, d_lOffsets, length * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_eOffsets, d_eOffsets, length * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_lcount, d_lcount, length * sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_ecount, d_ecount, length * sizeof(unsigned int), hipMemcpyDeviceToHost)); unsigned int esum = 0; unsigned int lsum = 0; for (unsigned int t = 0; t < length; t++) { esum += h_ecount[t]; lsum += h_lcount[t]; } printf("esum : %u , lsum : %u \n", esum, lsum); unsigned int incorrectTotal = 0; for (unsigned int i = 0; i < length; i++) { if (h_lOffsets[i] > ecount || h_lOffsets[i] + h_lcount[i] > ecount) { incorrectTotal++; printf("incorrect l @ %u, value %u\n",i,h_lOffsets[i]); } if (h_eOffsets[i] > ecount || h_eOffsets[i] + h_ecount[i] > ecount) { incorrectTotal++; printf("incorrect e @ %u, value %u\n",i,h_eOffsets[i]); } } free(h_lOffsets); free(h_eOffsets); free(h_lcount); free(h_ecount); } extern "C" void constructDebruijnGraphGold(unsigned int * ecount, KEY_PTR h_lmerKeys, //in lmer keys VALUE_PTR h_lmerValues, //in lmer values unsigned int lmerCount, //in total lmers KEY_PTR h_kmerKeys, //in unsigned long kmerCount, //in total kmers unsigned int l, //in k KEY_PTR h_TK, VALUE_PTR h_TV, unsigned int * h_bucketSeed, unsigned int bucketCount, EulerVertex ** h_ev, //out unsigned int ** h_l, //out unsigned int ** h_e, //out EulerEdge ** h_ee //out ) { //out dim3 grid; dim3 block; unsigned int * h_lcount; unsigned int * h_lstart; unsigned int * h_ecount; unsigned int * h_estart; unsigned int * h_lmerOffsets; unsigned int memsize; KEY_T validBitMask = 0; //unsigned int timerGPU = 0; unsigned int k = l - 1; //cutilCheckError(cutCreateTimer(&timerGPU)); memsize = (kmerCount) * sizeof(unsigned int) * 4; // 4-tuple for each kmer h_lcount = (unsigned int *) malloc(memsize); h_lstart = (unsigned int *) malloc(memsize); h_estart = (unsigned int *) malloc(memsize); h_ecount = (unsigned int *) malloc(memsize); h_lmerOffsets = (unsigned int*) malloc(lmerCount * VALUE_SIZE); for (unsigned int i = 0; i < k * 2; i++) { validBitMask = (validBitMask << 1) | 1; } unsigned int bitCount = 0; KEY_T bit = 1; while (bit != 0) { if (bit & validBitMask) bitCount++; bit = bit << 1; } memset(h_lcount, 0, sizeof(unsigned int) * 4 * kmerCount); memset(h_ecount, 0, sizeof(unsigned int) * 4 * kmerCount); //verifyDebruijnCountHost(d_lmerKeys,d_lmerValues,lmerCount,d_TK,d_TV,d_bucketSeed,bucketCount,d_lcount,d_ecount,validBitMask,kmerCount); for (unsigned int tid = 0; tid < lmerCount; tid++) { debruijnCountHost(h_lmerKeys, h_lmerValues, lmerCount, h_TK, h_TV, h_bucketSeed, bucketCount, h_lcount, h_ecount, validBitMask, bitCount, tid); } /* we need to perform pre-fix scan on , lcount, ecount, lmerValues, * lcount and ecount has equal number of elements ,4*kmercount * lmer has lmerCount elements, choose whichever is larger */ memset(h_lstart, 0, sizeof(unsigned int) * 4 * kmerCount); memset(h_estart, 0, sizeof(unsigned int) * 4 * kmerCount); memset(h_lmerOffsets, 0, sizeof(unsigned int) * lmerCount); prefixScan(h_lstart, h_lcount, 4 * kmerCount, false); prefixScan(h_estart, h_ecount, 4 * kmerCount, false); prefixScan(h_lmerOffsets, h_lmerValues, lmerCount, false); /* unsigned int buffer[2]; readData(buffer,d_lmerOffsets+lmerCount-1,1,sizeof(unsigned int)); readData(buffer+1,d_lmerValues+lmerCount-1,1,sizeof(unsigned int)); *ecount=buffer[0]+buffer[1]; */ *ecount = h_lmerOffsets[lmerCount - 1] + h_lmerValues[lmerCount - 1]; *h_ev = (EulerVertex *) malloc(sizeof(EulerVertex) * (kmerCount)); *h_l = (unsigned int *) malloc(sizeof(unsigned int) * (*ecount)); *h_e = (unsigned int *) malloc(sizeof(unsigned int) * (*ecount)); *h_ee = (EulerEdge *) malloc(sizeof(EulerEdge) * (*ecount)); memset(*h_e, 0, sizeof(unsigned int) * (*ecount)); memset(*h_l, 0, sizeof(unsigned int) * (*ecount)); // getOptimalLaunchConfiguration(kmerCount,&grid,&block); for (unsigned int tid = 0; tid < kmerCount; tid++) { setupVerticesHost(h_kmerKeys, kmerCount, h_TK, h_TV, h_bucketSeed, bucketCount, *h_ev, h_lcount, h_lstart, h_ecount, h_estart, tid); } //getOptimalLaunchConfiguration(lmerCount,&grid,&block); for (unsigned int tid = 0; tid < lmerCount; tid++) { setupEdgesHost(h_lmerKeys, h_lmerValues, h_lmerOffsets, lmerCount, h_TK, h_TV, h_bucketSeed, bucketCount, *h_l, *h_e, *h_ee, h_lstart, h_estart, validBitMask, tid); } free(h_lmerOffsets); free(h_lcount); free(h_lstart); free(h_estart); free(h_ecount); } //extern "C" void constructDebruijnGraphDevice(unsigned int * ecount, KEY_PTR d_lmerKeys, //in lmer keys VALUE_PTR d_lmerValues, //in lmer values unsigned int lmerCount, //in total lmers KEY_PTR d_kmerKeys, //in unsigned long kmerCount, //in total kmers unsigned int l, //in k KEY_PTR d_TK, VALUE_PTR d_TV, unsigned int * d_bucketSeed, unsigned int bucketCount, EulerVertex ** d_ev, //out unsigned int ** d_l, //out unsigned int ** d_e, //out EulerEdge ** d_ee //out ) { //out dim3 grid; dim3 block; unsigned int * d_lcount; unsigned int * d_lstart; unsigned int * d_ecount; unsigned int * d_estart; unsigned int * d_lmerOffsets; unsigned int mem_size; KEY_T validBitMask = 0; //unsigned int timerGPU = 0; unsigned int k = l - 1; //cutilCheckError(cutCreateTimer(&timerGPU)); mem_size = (kmerCount) * sizeof(unsigned int) * 4; // 4-tuple for each kmer allocateMemory((void**) &d_lcount, mem_size); allocateMemory((void**) &d_lstart, mem_size); allocateMemory((void**) &d_estart, mem_size); allocateMemory((void**) &d_ecount, mem_size); allocateMemory((void**) &d_lmerOffsets, lmerCount * VALUE_SIZE); for (unsigned int i = 0; i < k * 2; i++) { validBitMask = (validBitMask << 1) | 1; } logMessage(LOG_LVL_DETAIL,"deb bit mask %lu\n",validBitMask); logMessage(LOG_LVL_DETAIL, "kernel: debruijnCount"); getOptimalLaunchConfiguration(lmerCount, &grid, &block); //verifyDebruijnCountHost(d_lmerKeys,d_lmerValues,lmerCount,d_TK,d_TV,d_bucketSeed,bucketCount,d_lcount,d_ecount,validBitMask,kmerCount); hipLaunchKernelGGL(( debruijnCount), dim3(grid),dim3(block), 0, 0, d_lmerKeys,d_lmerValues,lmerCount,d_TK,d_TV,d_bucketSeed,bucketCount,d_lcount,d_ecount,validBitMask); CheckCUDAError(); //verifyDebruijnCountHost(d_lmerKeys,d_lmerValues,lmerCount,d_TK,d_TV,d_bucketSeed,bucketCount,d_lcount,d_ecount,validBitMask,kmerCount); /* we need to perform pre-fix scan on , lcount, ecount, lmerValues, * lcount and ecount has equal number of elements ,4*kmercount * lmer has lmerCount elements, choose whichever is larger */ // unsigned int maxElements=(lmerCount>4*kmerCount)?lmerCount:4*kmerCount; CUDPPConfiguration configKmer; configKmer.op = CUDPP_ADD; configKmer.datatype = CUDPP_UINT; configKmer.algorithm = CUDPP_SCAN; configKmer.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_EXCLUSIVE; CUDPPHandle scanplanKmer = 0; cudppPlan(&scanplanKmer, configKmer, 4 * kmerCount, 1, 0); CheckCUDAError(); cudppScan(scanplanKmer, d_lstart, d_lcount, 4 * kmerCount); cudppScan(scanplanKmer, d_estart, d_ecount, 4 * kmerCount); cudppDestroyPlan(scanplanKmer); CUDPPConfiguration configLmer; configLmer.op = CUDPP_ADD; configLmer.datatype = CUDPP_UINT; configLmer.algorithm = CUDPP_SCAN; configLmer.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_EXCLUSIVE; CUDPPHandle scanplanLmer = 0; CUDPPResult result = cudppPlan(&scanplanLmer, configLmer, lmerCount, 1, 0); CheckCUDAError(); cudppScan(scanplanLmer, d_lmerOffsets, d_lmerValues, lmerCount); cudppDestroyPlan(scanplanLmer); //validatePrefixScan(d_lstart,d_lcount,4*kmerCount,false); //validatePrefixScan(d_estart,d_ecount,4*kmerCount,false); //validatePrefixScan(d_lmerOffsets,d_lmerValues,lmerCount,false); unsigned int buffer[2]; readData(buffer, d_lmerOffsets + lmerCount - 1, 1, sizeof(unsigned int)); readData(buffer + 1, d_lmerValues + lmerCount - 1, 1, sizeof(unsigned int)); *ecount = buffer[0] + buffer[1]; logMessage(LOG_LVL_MSG, "debruijn vertex count:%d \ndebruijn edge count:%d", kmerCount, *ecount); allocateMemory((void**) d_ev, sizeof(EulerVertex) * (kmerCount)); allocateMemory((void**) d_l, sizeof(unsigned int) * (*ecount)); allocateMemory((void**) d_e, sizeof(unsigned int) * (*ecount)); allocateMemory((void**) d_ee, sizeof(EulerEdge) * (*ecount)); CheckCUDAError(); hipMemset(*d_e, 0, sizeof(unsigned int) * (*ecount)); hipMemset(*d_l, 0, sizeof(unsigned int) * (*ecount)); CheckCUDAError(); logMessage(LOG_LVL_DETAIL, "kernel: setupVertices"); getOptimalLaunchConfiguration(kmerCount, &grid, &block); //cutilCheckError(cutStartTimer(timerGPU)); hipLaunchKernelGGL(( setupVertices), dim3(grid),dim3(block), 0, 0, d_kmerKeys,kmerCount,d_TK,d_TV,d_bucketSeed,bucketCount,*d_ev,d_lcount,d_lstart,d_ecount,d_estart); CheckCUDAError(); ///*DEBUG*/verifyleOffsets(d_lstart,d_lcount,d_estart,d_ecount,4*kmerCount,*ecount); getOptimalLaunchConfiguration(lmerCount, &grid, &block); //verifySetupEdges(d_lmerKeys,d_lmerValues,d_lmerOffsets,lmerCount, d_TK,d_TV,d_bucketSeed,bucketCount,*d_l,*d_e,*d_ee,d_lcount,d_lstart,d_ecount,d_estart,kmerCount,*ecount,validBitMask); logMessage(LOG_LVL_DETAIL,"kernel: setupEdges"); hipLaunchKernelGGL(( setupEdges), dim3(grid),dim3(block), 0, 0, d_lmerKeys,d_lmerValues,d_lmerOffsets,lmerCount, d_TK,d_TV,d_bucketSeed,bucketCount,*d_l,*d_e,*d_ee,d_lstart,d_estart,validBitMask); CheckCUDAError(); //cutilCheckError(cutStopTimer(timerGPU)); //logMessage(LOG_LVL_MSG,"CPU Time : %f",cutGetTimerValue(timerGPU)); //constructDebruijnGold( d_idata, d_icount, kmerCount,kmerLength,totalVertices,validBitMask); //printDebruijnGraph(*d_ev, kmerCount, *d_l, *d_e, *d_ee, *ecount, k, 0); // may not need it //printDebruijnGraph(*d_ev,kmerCount,*d_l,*d_e,*d_ee,*ecount,k,1); //printData(*d_ev,*vcount); //printData(*d_ee,*ecount); //cutilCheckError(cutDeleteTimer(timerGPU)); deallocateMemory(d_lmerOffsets); deallocateMemory(d_lcount); deallocateMemory(d_lstart); deallocateMemory(d_estart); deallocateMemory(d_ecount); }
814713048e48789c790fe439e7aaec5731013fab.cu
#define CUDPP_STATIC_LIB // #include "../nvidia_sdk/C/common/inc/cutil_inline.h" #include "/Volumes/Macintosh HD/Developer/NVIDIA/CUDA-7.5/samples/common/inc/helper_cuda.h" // lib above replaced w/this one at CUDA 5.0 #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdio.h> /* defines printf for tests */ #include <time.h> /* defines time_t for timings in the test */ #include <math.h> #ifdef linux # include <endian.h> /* attempt to define endianness */ #endif //#include "debruijn.h" #include "kmer.h" #include "graph.h" #include "cudpp.h" #include "utils.h" #include "common.h" #include "gpuhash.h" #include "gpuhash_device.h" //#include "utils.cpp" #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \ __BYTE_ORDER == __LITTLE_ENDIAN) || \ (defined(i386) || defined(__i386__) || defined(__i486__) || \ defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL)) # define HASH_LITTLE_ENDIAN 1 # define HASH_BIG_ENDIAN 0 #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \ __BYTE_ORDER == __BIG_ENDIAN) || \ (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel)) # define HASH_LITTLE_ENDIAN 0 # define HASH_BIG_ENDIAN 1 #else # define HASH_LITTLE_ENDIAN 0 # define HASH_BIG_ENDIAN 0 #endif /* unsigned int _host_hash_h(KEY_T key, unsigned int bucketCount){ return ((C0+C1*key)% LARGE_PRIME )% bucketCount; } VALUE_T getHashValue2Host(KEY_T key, TABLE_PTR T,unsigned int *bucketSize, unsigned int bucketCount){ unsigned int bucket=_host_hash_h(key,bucketCount); unsigned int l=0; unsigned int r=bucketSize[bucket]; unsigned int offset=bucket * BUCKET_SIZE; unsigned int mid=(l+r)>>1; while(l<r){ mid =l+((r-l)/2); if( T[offset+(mid<<1)] <key) { l=mid+1; }else { r=mid; } } if(l < bucketSize[bucket] && T[offset+(l<<1)]==key){ return T[offset+(l<<1)+1]; }else { return MAX_INT; } } */ /*** * Inline Printing Routine for l and e structures */ inline void printData(unsigned int * d_lstart, unsigned int * d_lcount, unsigned int * d_estart, unsigned int * d_ecount, unsigned int length) { unsigned int * h_lstart; unsigned int * h_lcount; unsigned int * h_estart; unsigned int * h_ecount; h_lstart = (unsigned int *) malloc(sizeof(unsigned int) * length); h_lcount = (unsigned int *) malloc(sizeof(unsigned int) * length); h_estart = (unsigned int *) malloc(sizeof(unsigned int) * length); h_ecount = (unsigned int *) malloc(sizeof(unsigned int) * length); checkCudaErrors( cudaMemcpy(h_lstart, d_lstart, length * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_lcount, d_lcount, length * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_estart, d_estart, length * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_ecount, d_ecount, length * sizeof(unsigned int), cudaMemcpyDeviceToHost)); for (unsigned int i = 0; i < length; i++) { printf("[%4u]\t es:%4u\t ec:%4u\t ls:%4u\t lc:%4u\n", i, h_estart[i], h_ecount[i], h_lstart[i], h_lcount[i]); } free(h_lstart); free(h_lcount); free(h_estart); free(h_ecount); } //__global__ void debruijnCount(unsigned int k,unsigned long kmerCount, unsigned char * idata,unsigned int * icount,unsigned int * vcount, unsigned int * lcount,unsigned int * ecount,unsigned int validBitMask){ /* * This kernel works on each l-mer ,counting edges of the graph. */ __global__ void debruijnCount(KEY_PTR lmerKeys, /* lmer keys */ VALUE_PTR lmerValues, /* lmer frequency */ unsigned int lmerCount, /* total lmers */ KEY_PTR TK, /* Keys' pointer for Hash table*/ VALUE_PTR TV, /* Value pointer for Hash table*/ unsigned int * bucketSeed, /* bucketSize: size of each bucket (it should be renamed to bucketSize)*/ unsigned int bucketCount, /* total buckets */ unsigned int * lcount, /* leaving edge count array : OUT */ unsigned int * ecount, /* entering edge count array: OUT */ KEY_T validBitMask /* bit mask for K length encoded bits*/ ) { unsigned int tid = (blockDim.x * blockDim.y * gridDim.x * blockIdx.y) + (blockDim.x * blockDim.y * blockIdx.x) + (blockDim.x * threadIdx.y) + threadIdx.x; if (tid < lmerCount) { KEY_T lmer = lmerKeys[tid]; VALUE_T lmerValue = lmerValues[tid]; KEY_T prefix = (lmer & (validBitMask << 2)) >> 2; KEY_T suffix = (lmer & validBitMask); KEY_T lomask = 3; VALUE_T prefixIndex = getHashValue(prefix, TK, TV, bucketSeed, bucketCount); VALUE_T suffixIndex = getHashValue(suffix, TK, TV, bucketSeed, bucketCount); KEY_T transitionTo = (lmer & lomask); KEY_T transitionFrom = ((lmer >> __popcll(validBitMask)) & lomask); //atomicAdd(lcount+(prefixIndex<<2 )+transition,lmerValue); //atomicAdd(ecount+(suffixIndex<<2)+transition,lmerValue); lcount[(prefixIndex << 2) + transitionTo] = lmerValue; ecount[(suffixIndex << 2) + transitionFrom] = lmerValue; } } /** * This is cpu version for same kernel. for Debugging purpose only */ void debruijnCountHost(KEY_PTR lmerKeys, VALUE_PTR lmerValues, unsigned int lmerCount, KEY_PTR TK, VALUE_PTR TV, unsigned int * bucketSeed, unsigned int bucketCount, unsigned int * lcount, unsigned int * ecount, KEY_T validBitMask, unsigned int bitCount, unsigned int tid) { // unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if (tid < lmerCount) { KEY_T lmer = lmerKeys[tid]; VALUE_T lmerValue = lmerValues[tid]; KEY_T prefix = (lmer & (validBitMask << 2)) >> 2; KEY_T suffix = (lmer & validBitMask); KEY_T lomask = 3; unsigned int b; VALUE_T prefixIndex = host_getHashValue(prefix, TK, TV, bucketSeed, bucketCount, &b); VALUE_T suffixIndex = host_getHashValue(suffix, TK, TV, bucketSeed, bucketCount, &b); KEY_T transitionTo = (lmer & lomask); KEY_T transitionFrom = ((lmer >> bitCount) & lomask); //atomicAdd(lcount+(prefixIndex<<2 )+transition,lmerValue); //atomicAdd(ecount+(suffixIndex<<2)+transition,lmerValue); if (lcount[(prefixIndex << 2) + transitionTo] > 0) { lcount[(prefixIndex << 2) + transitionTo] = lmerValue; } else { lcount[(prefixIndex << 2) + transitionTo] = lmerValue; } if (ecount[(suffixIndex << 2) + transitionFrom] > 0) { ecount[(suffixIndex << 2) + transitionFrom] = lmerValue; } else { ecount[(suffixIndex << 2) + transitionFrom] = lmerValue; } } } /* * stub for debruijnCountHost for debugging purpose */ void verifyDebruijnCountHost(KEY_PTR d_lmerKeys, VALUE_PTR d_lmerValues, unsigned int lmerCount, KEY_PTR d_TK, VALUE_PTR d_TV, unsigned int * d_bucketSeed, unsigned int bucketCount, unsigned int * d_lcount, unsigned int * d_ecount, KEY_T validBitMask, unsigned int kmerCount) { KEY_PTR h_lmerKeys; VALUE_PTR h_lmerValues; KEY_PTR h_TK; VALUE_PTR h_TV; unsigned int * h_bucketSeed; unsigned int * h_lcount; unsigned int * h_ecount; unsigned int * hq_lcount; unsigned int * hq_ecount; h_lmerKeys = (KEY_PTR) malloc(lmerCount * sizeof(KEY_T)); h_lmerValues = (VALUE_PTR) malloc(lmerCount * sizeof(VALUE_T)); h_TK = (KEY_PTR) malloc(bucketCount * BUCKET_KEY_SIZE); h_TV = (VALUE_PTR) malloc(bucketCount * BUCKET_VALUE_SIZE); h_bucketSeed = (unsigned int *) malloc(bucketCount * sizeof(unsigned int)); h_lcount = (unsigned int *) malloc(4 * kmerCount * sizeof(unsigned int)); h_ecount = (unsigned int *) malloc(4 * kmerCount * sizeof(unsigned int)); hq_lcount = (unsigned int *) malloc(4 * kmerCount * sizeof(unsigned int)); hq_ecount = (unsigned int *) malloc(4 * kmerCount * sizeof(unsigned int)); checkCudaErrors( cudaMemcpy(h_lmerKeys, d_lmerKeys, lmerCount * KEY_SIZE, cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_lmerValues, d_lmerValues, lmerCount * VALUE_SIZE, cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_TK, d_TK, bucketCount * BUCKET_KEY_SIZE, cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_TV, d_TV, bucketCount * BUCKET_VALUE_SIZE, cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_bucketSeed, d_bucketSeed, bucketCount * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(hq_lcount, d_lcount, 4 * kmerCount * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(hq_ecount, d_ecount, 4 * kmerCount * sizeof(unsigned int), cudaMemcpyDeviceToHost)); memset(h_lcount, 0, 4 * kmerCount * sizeof(unsigned int)); memset(h_ecount, 0, 4 * kmerCount * sizeof(unsigned int)); unsigned int bitCount = 0; KEY_T bit = 1; while (bit != 0) { if (bit & validBitMask) bitCount++; bit = bit << 1; } unsigned int edgesCount = 0; for (unsigned int i = 0; i < lmerCount; i++) { debruijnCountHost(h_lmerKeys, h_lmerValues, lmerCount, h_TK, h_TV, h_bucketSeed, bucketCount, h_lcount, h_ecount, validBitMask, bitCount, i); edgesCount += h_lmerValues[i]; } unsigned int esum = 0; unsigned int qesum = 0; unsigned int lsum = 0; unsigned int qlsum = 0; unsigned int ei = 0; unsigned int li = 0; unsigned int qei = 0; unsigned int qli = 0; for (int j = 0; j < 4 * kmerCount; j++) { esum += h_ecount[j]; lsum += h_lcount[j]; qesum += hq_ecount[j]; qlsum += hq_lcount[j]; if (esum > edgesCount && ei < 1) ei = j; if (lsum > edgesCount && li < 1) li = j; if (qesum > edgesCount && qei < 1) qei = j; if (qlsum > edgesCount && qli < 1) qli = j; } printf( "lmerCount: %u, esum: %u, lsum: %u, ei: %u li:%u \n qesum:%u , qlsum:%u \n", edgesCount, esum, lsum, ei, li, qesum, qlsum); unsigned int enc = 0; unsigned int lnc = 0; for (unsigned int k = 0; k < 4 * kmerCount; k++) { if (h_lcount[k] != hq_lcount[k]) lnc++; if (h_ecount[k] != hq_ecount[k]) enc++; } printf("enc: %u, lnc:%u \n", enc, lnc); free(h_lmerValues); free(h_TK); free(h_TV); free(h_bucketSeed); free(h_lcount); free(h_ecount); free(hq_lcount); free(hq_ecount); } /** * CPU prefix scan */ void prefixScan(unsigned int * h_out, unsigned int * h_in, unsigned int length, bool inclusive) { memset(h_out, 0, length * sizeof(unsigned int)); /*calculate gold*/ if (inclusive) { h_out[0] = h_in[0]; } else { h_out[0] = 0; } for (unsigned int i = 1; i < length; i++) { h_out[i] = h_out[i - 1] + h_in[i - (inclusive ? 0 : 1)]; } } /* * prefix sum validator **/ void validatePrefixScan(unsigned int * d_output, unsigned int * d_input, unsigned int length, bool inclusive) { unsigned int * h_input; unsigned int * h_output; unsigned int * hq_output; h_input = (unsigned int *) malloc(length * sizeof(unsigned int)); h_output = (unsigned int *) malloc(length * sizeof(unsigned int)); hq_output = (unsigned int *) malloc(length * sizeof(unsigned int)); checkCudaErrors( cudaMemcpy(h_input, d_input, length * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(hq_output, d_output, length * sizeof(unsigned int), cudaMemcpyDeviceToHost)); /* memset(h_output,0,length*sizeof(unsigned int)); if(inclusive) { h_output[0]=h_input[0];} else { h_output[0]=0; } for(unsigned int i=1;i<length;i++){ h_output[i]=h_output[i-1]+h_input[i-(inclusive?0:1)]; } */ prefixScan(h_output, h_input, length, inclusive); /*compare*/ for (unsigned int j = 0; j < length; j++) { if (h_output[j] != hq_output[j]) { printf("differnce at index:%u is gold:%u, cudpp\n", j, h_output[j], hq_output[j]); } } free(h_input); free(h_output); free(hq_output); } /* * This kernel works on a k-mer (l-1mer) which are vertices of the graph. */ __global__ void setupVertices(KEY_PTR kmerKeys, unsigned int kmerCount, KEY_PTR TK, VALUE_PTR TV, unsigned int * bucketSeed, unsigned int bucketCount, EulerVertex * ev, unsigned int * lcount, unsigned int * loffset, unsigned int * ecount, unsigned int * eoffset) { unsigned int tid = (blockDim.x * blockDim.y * gridDim.x * blockIdx.y) + (blockDim.x * blockDim.y * blockIdx.x) + (blockDim.x * threadIdx.y) + threadIdx.x; if (tid < kmerCount) { KEY_T key = kmerKeys[tid]; VALUE_T index = getHashValue(key, TK, TV, bucketSeed, bucketCount); ; ev[index].vid = key; ev[index].lp = loffset[(index << 2)]; ev[index].lcount = lcount[(index << 2)] + lcount[(index << 2) + 1] + lcount[(index << 2) + 2] + lcount[(index << 2) + 3]; ev[index].ep = eoffset[(index << 2)]; ev[index].ecount = ecount[(index << 2)] + ecount[(index << 2) + 1] + ecount[(index << 2) + 2] + ecount[(index << 2) + 3]; } } void setupVerticesHost(KEY_PTR kmerKeys, unsigned int kmerCount, KEY_PTR TK, VALUE_PTR TV, unsigned int * bucketSeed, unsigned int bucketCount, EulerVertex * ev, unsigned int * lcount, unsigned int * loffset, unsigned int * ecount, unsigned int * eoffset, unsigned int tid) { // unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if (tid < kmerCount) { KEY_T key = kmerKeys[tid]; unsigned int bucket; VALUE_T index = host_getHashValue(key, TK, TV, bucketSeed, bucketCount, &bucket); ; ev[index].vid = key; ev[index].lp = loffset[(index << 2)]; ev[index].lcount = lcount[(index << 2)] + lcount[(index << 2) + 1] + lcount[(index << 2) + 2] + lcount[(index << 2) + 3]; ev[index].ep = eoffset[(index << 2)]; ev[index].ecount = ecount[(index << 2)] + ecount[(index << 2) + 1] + ecount[(index << 2) + 2] + ecount[(index << 2) + 3]; } } /* * This kernel works on an l-mer, which represents an edge * in the debruijn Graph. */ __global__ void setupEdges( KEY_PTR lmerKeys, VALUE_PTR lmerValues, unsigned int * lmerOffsets, const unsigned int lmerCount, KEY_PTR TK, VALUE_PTR TV, unsigned int * bucketSeed, const unsigned int bucketCount, unsigned int * l, unsigned int * e, EulerEdge * ee, unsigned int * loffsets, unsigned int * eoffsets, const KEY_T validBitMask) { unsigned int tid = (blockDim.x * blockDim.y * gridDim.x * blockIdx.y) + (blockDim.x * blockDim.y * blockIdx.x) + (blockDim.x * threadIdx.y) + threadIdx.x; if (tid < lmerCount) { KEY_T lmer = lmerKeys[tid]; VALUE_T lmerValue = lmerValues[tid]; KEY_T prefix = (lmer & (validBitMask << 2)) >> 2; KEY_T suffix = (lmer & validBitMask); KEY_T lomask = 3; //prefix and suffix index must be less than kmer count VALUE_T prefixIndex = getHashValue(prefix, TK, TV, bucketSeed, bucketCount); VALUE_T suffixIndex = getHashValue(suffix, TK, TV, bucketSeed, bucketCount); KEY_T transitionTo = (lmer & lomask); KEY_T transitionFrom = ((lmer >> __popcll(validBitMask)) & lomask); unsigned int loffset = loffsets[(prefixIndex << 2) + transitionTo]; unsigned int eoffset = eoffsets[(suffixIndex << 2) + transitionFrom]; unsigned int lmerOffset = lmerOffsets[tid]; for (unsigned int i = 0; i < lmerValue; i++) { ee[lmerOffset].eid =lmerOffset; ee[lmerOffset].v1 = prefixIndex; ee[lmerOffset].v2 = suffixIndex; // lmerOffset; ee[lmerOffset].s = lmerValues[lmerCount - 1] + lmerOffsets[lmerCount - 1]; l[loffset] = lmerOffset; e[eoffset] = lmerOffset; loffset++; eoffset++; lmerOffset++; } } } void setupEdgesHost(KEY_PTR const lmerKeys, VALUE_PTR const lmerValues, unsigned int * const lmerOffsets, const unsigned int lmerCount, KEY_PTR const TK, VALUE_PTR const TV, unsigned int * const bucketSeed, const unsigned int bucketCount, unsigned int * const l, unsigned int * const e, EulerEdge * const ee, unsigned int * const loffsets, unsigned int * const eoffsets, const KEY_T validBitMask, const unsigned int tid) { // unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if (tid < lmerCount) { KEY_T lmer = lmerKeys[tid]; VALUE_T lmerValue = lmerValues[tid]; KEY_T prefix = (lmer & (validBitMask << 2)) >> 2; KEY_T suffix = (lmer & validBitMask); KEY_T lomask = 3; unsigned int bucket; //prefix and suffix index must be less than kmer count VALUE_T prefixIndex = host_getHashValue(prefix, TK, TV, bucketSeed, bucketCount, &bucket); VALUE_T suffixIndex = host_getHashValue(suffix, TK, TV, bucketSeed, bucketCount, &bucket); KEY_T transitionTo = (lmer & lomask); KEY_T transitionFrom = ((lmer >> 16) & lomask); unsigned int loffset = loffsets[(prefixIndex << 2) + transitionTo]; unsigned int eoffset = eoffsets[(suffixIndex << 2) + transitionFrom]; unsigned int lmerOffset = lmerOffsets[tid]; for (int i = 0; i < lmerValue; i++) { ee[lmerOffset].eid = lmerOffset; ee[lmerOffset].v1 = prefixIndex; ee[lmerOffset].v2 = suffixIndex; ee[lmerOffset].s = lmerValues[lmerCount - 1] + lmerOffsets[lmerCount - 1]; l[loffset] = lmerOffset; e[eoffset] = lmerOffset; loffset++; eoffset++; lmerOffset++; } } } void verifySetupEdges(KEY_PTR d_lmerKeys, VALUE_PTR d_lmerValues, unsigned int * d_lmerOffsets, const unsigned int lmerCount, KEY_PTR d_TK, VALUE_PTR d_TV, unsigned int * d_bucketSeed, const unsigned int bucketCount, unsigned int * d_l, unsigned int * d_e, EulerEdge * d_ee, unsigned int * d_lcount, unsigned int * d_loffsets, unsigned int * d_ecount, unsigned int * d_eoffsets, unsigned int kmerCount, unsigned int ecount, const KEY_T validBitMask) { KEY_PTR h_lmerKeys; VALUE_PTR h_lmerValues; unsigned int * h_lmerOffsets; KEY_PTR h_TK; VALUE_PTR h_TV; unsigned int * h_bucketSeed; unsigned int * h_l; unsigned int * h_e; EulerEdge * h_ee; unsigned int * h_loffsets; unsigned int * h_lcount; unsigned int * h_eoffsets; unsigned int * h_ecount; h_lmerKeys = (KEY_PTR) malloc(lmerCount * KEY_SIZE); h_lmerValues = (VALUE_PTR) malloc(lmerCount * VALUE_SIZE); h_lmerOffsets = (unsigned int *) malloc(lmerCount * sizeof(unsigned int)); h_TK = (KEY_PTR) malloc(bucketCount * BUCKET_KEY_SIZE); h_TV = (VALUE_PTR) malloc(bucketCount * BUCKET_VALUE_SIZE); h_bucketSeed = (unsigned int *) malloc(bucketCount * sizeof(unsigned int)); h_bucketSeed = (unsigned int *) malloc(bucketCount * sizeof(unsigned int)); h_l = (unsigned int *) malloc(ecount * sizeof(unsigned int)); h_e = (unsigned int *) malloc(ecount * sizeof(unsigned int)); h_ee = (EulerEdge *) malloc(ecount * sizeof(EulerEdge)); h_loffsets = (unsigned int *) malloc(kmerCount * 4 * sizeof(unsigned int)); h_eoffsets = (unsigned int *) malloc(kmerCount * 4 * sizeof(unsigned int)); h_lcount = (unsigned int *) malloc(kmerCount * 4 * sizeof(unsigned int)); h_ecount = (unsigned int *) malloc(kmerCount * 4 * sizeof(unsigned int)); checkCudaErrors( cudaMemcpy(h_lmerKeys, d_lmerKeys, lmerCount * KEY_SIZE, cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_lmerValues, d_lmerValues, lmerCount * VALUE_SIZE, cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_lmerOffsets, d_lmerOffsets, lmerCount * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_TK, d_TK, bucketCount * BUCKET_KEY_SIZE, cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_TV, d_TV, bucketCount * BUCKET_VALUE_SIZE, cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_bucketSeed, d_bucketSeed, bucketCount * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_ee, d_ee, ecount * sizeof(EulerEdge), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_loffsets, d_loffsets, kmerCount * 4 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_eoffsets, d_eoffsets, kmerCount * 4 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_lcount, d_lcount, kmerCount * 4 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_ecount, d_ecount, kmerCount * 4 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_l, d_l, ecount * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_e, d_e, ecount * sizeof(unsigned int), cudaMemcpyDeviceToHost)); printf(".....diff....\n"); for (unsigned int j = 0; j < (4 * kmerCount) - 1; j++) { if (h_lcount[j] != h_loffsets[j + 1] - h_loffsets[j]) { printf(" lcount mismatch j:[%u] lcount:[%u] diff:[%u]\n", j, h_lcount[j], h_loffsets[j + 1] - h_loffsets[j]); } if (h_ecount[j] != h_eoffsets[j + 1] - h_eoffsets[j]) { printf(" ecount mismatch j:[%u] ecount:[%u] diff:[%u]\n", j, h_ecount[j], h_eoffsets[j + 1] - h_eoffsets[j]); } } /* for(unsigned int k=0;k<4*kmerCount;k++){ printf("[%u]: loffset[%u] ,lcount[%u] ,eoffset[%u], ecount[%u]\n",k,h_loffsets[k],h_lcount[k],h_eoffsets[k],h_ecount[k]); } */ for (int i = 0; i < lmerCount; i++) { setupEdgesHost(h_lmerKeys, h_lmerValues, h_lmerOffsets, lmerCount, h_TK, h_TV, h_bucketSeed, bucketCount, h_l, h_e, h_ee, h_loffsets, h_eoffsets, validBitMask, i); } free(h_lmerKeys); free(h_lmerValues); free(h_lmerOffsets); free(h_TK); free(h_TV); free(h_bucketSeed); free(h_l); free(h_e); free(h_ee); free(h_loffsets); free(h_eoffsets); free(h_lcount); free(h_ecount); } void verifyleOffsets(unsigned int * d_lOffsets, unsigned int * d_lcount, unsigned int * d_eOffsets, unsigned int * d_ecount, unsigned int length, unsigned int ecount) { unsigned int * h_lOffsets; unsigned int * h_eOffsets; unsigned int * h_lcount; unsigned int * h_ecount; h_lOffsets = (unsigned int*) malloc(length * sizeof(unsigned int)); h_eOffsets = (unsigned int *) malloc(length * sizeof(unsigned int)); h_lcount = (unsigned int*) malloc(length * sizeof(unsigned int)); h_ecount = (unsigned int *) malloc(length * sizeof(unsigned int)); checkCudaErrors( cudaMemcpy(h_lOffsets, d_lOffsets, length * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_eOffsets, d_eOffsets, length * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_lcount, d_lcount, length * sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_ecount, d_ecount, length * sizeof(unsigned int), cudaMemcpyDeviceToHost)); unsigned int esum = 0; unsigned int lsum = 0; for (unsigned int t = 0; t < length; t++) { esum += h_ecount[t]; lsum += h_lcount[t]; } printf("esum : %u , lsum : %u \n", esum, lsum); unsigned int incorrectTotal = 0; for (unsigned int i = 0; i < length; i++) { if (h_lOffsets[i] > ecount || h_lOffsets[i] + h_lcount[i] > ecount) { incorrectTotal++; printf("incorrect l @ %u, value %u\n",i,h_lOffsets[i]); } if (h_eOffsets[i] > ecount || h_eOffsets[i] + h_ecount[i] > ecount) { incorrectTotal++; printf("incorrect e @ %u, value %u\n",i,h_eOffsets[i]); } } free(h_lOffsets); free(h_eOffsets); free(h_lcount); free(h_ecount); } extern "C" void constructDebruijnGraphGold(unsigned int * ecount, KEY_PTR h_lmerKeys, //in lmer keys VALUE_PTR h_lmerValues, //in lmer values unsigned int lmerCount, //in total lmers KEY_PTR h_kmerKeys, //in unsigned long kmerCount, //in total kmers unsigned int l, //in k KEY_PTR h_TK, VALUE_PTR h_TV, unsigned int * h_bucketSeed, unsigned int bucketCount, EulerVertex ** h_ev, //out unsigned int ** h_l, //out unsigned int ** h_e, //out EulerEdge ** h_ee //out ) { //out dim3 grid; dim3 block; unsigned int * h_lcount; unsigned int * h_lstart; unsigned int * h_ecount; unsigned int * h_estart; unsigned int * h_lmerOffsets; unsigned int memsize; KEY_T validBitMask = 0; //unsigned int timerGPU = 0; unsigned int k = l - 1; //cutilCheckError(cutCreateTimer(&timerGPU)); memsize = (kmerCount) * sizeof(unsigned int) * 4; // 4-tuple for each kmer h_lcount = (unsigned int *) malloc(memsize); h_lstart = (unsigned int *) malloc(memsize); h_estart = (unsigned int *) malloc(memsize); h_ecount = (unsigned int *) malloc(memsize); h_lmerOffsets = (unsigned int*) malloc(lmerCount * VALUE_SIZE); for (unsigned int i = 0; i < k * 2; i++) { validBitMask = (validBitMask << 1) | 1; } unsigned int bitCount = 0; KEY_T bit = 1; while (bit != 0) { if (bit & validBitMask) bitCount++; bit = bit << 1; } memset(h_lcount, 0, sizeof(unsigned int) * 4 * kmerCount); memset(h_ecount, 0, sizeof(unsigned int) * 4 * kmerCount); //verifyDebruijnCountHost(d_lmerKeys,d_lmerValues,lmerCount,d_TK,d_TV,d_bucketSeed,bucketCount,d_lcount,d_ecount,validBitMask,kmerCount); for (unsigned int tid = 0; tid < lmerCount; tid++) { debruijnCountHost(h_lmerKeys, h_lmerValues, lmerCount, h_TK, h_TV, h_bucketSeed, bucketCount, h_lcount, h_ecount, validBitMask, bitCount, tid); } /* we need to perform pre-fix scan on , lcount, ecount, lmerValues, * lcount and ecount has equal number of elements ,4*kmercount * lmer has lmerCount elements, choose whichever is larger */ memset(h_lstart, 0, sizeof(unsigned int) * 4 * kmerCount); memset(h_estart, 0, sizeof(unsigned int) * 4 * kmerCount); memset(h_lmerOffsets, 0, sizeof(unsigned int) * lmerCount); prefixScan(h_lstart, h_lcount, 4 * kmerCount, false); prefixScan(h_estart, h_ecount, 4 * kmerCount, false); prefixScan(h_lmerOffsets, h_lmerValues, lmerCount, false); /* unsigned int buffer[2]; readData(buffer,d_lmerOffsets+lmerCount-1,1,sizeof(unsigned int)); readData(buffer+1,d_lmerValues+lmerCount-1,1,sizeof(unsigned int)); *ecount=buffer[0]+buffer[1]; */ *ecount = h_lmerOffsets[lmerCount - 1] + h_lmerValues[lmerCount - 1]; *h_ev = (EulerVertex *) malloc(sizeof(EulerVertex) * (kmerCount)); *h_l = (unsigned int *) malloc(sizeof(unsigned int) * (*ecount)); *h_e = (unsigned int *) malloc(sizeof(unsigned int) * (*ecount)); *h_ee = (EulerEdge *) malloc(sizeof(EulerEdge) * (*ecount)); memset(*h_e, 0, sizeof(unsigned int) * (*ecount)); memset(*h_l, 0, sizeof(unsigned int) * (*ecount)); // getOptimalLaunchConfiguration(kmerCount,&grid,&block); for (unsigned int tid = 0; tid < kmerCount; tid++) { setupVerticesHost(h_kmerKeys, kmerCount, h_TK, h_TV, h_bucketSeed, bucketCount, *h_ev, h_lcount, h_lstart, h_ecount, h_estart, tid); } //getOptimalLaunchConfiguration(lmerCount,&grid,&block); for (unsigned int tid = 0; tid < lmerCount; tid++) { setupEdgesHost(h_lmerKeys, h_lmerValues, h_lmerOffsets, lmerCount, h_TK, h_TV, h_bucketSeed, bucketCount, *h_l, *h_e, *h_ee, h_lstart, h_estart, validBitMask, tid); } free(h_lmerOffsets); free(h_lcount); free(h_lstart); free(h_estart); free(h_ecount); } //extern "C" void constructDebruijnGraphDevice(unsigned int * ecount, KEY_PTR d_lmerKeys, //in lmer keys VALUE_PTR d_lmerValues, //in lmer values unsigned int lmerCount, //in total lmers KEY_PTR d_kmerKeys, //in unsigned long kmerCount, //in total kmers unsigned int l, //in k KEY_PTR d_TK, VALUE_PTR d_TV, unsigned int * d_bucketSeed, unsigned int bucketCount, EulerVertex ** d_ev, //out unsigned int ** d_l, //out unsigned int ** d_e, //out EulerEdge ** d_ee //out ) { //out dim3 grid; dim3 block; unsigned int * d_lcount; unsigned int * d_lstart; unsigned int * d_ecount; unsigned int * d_estart; unsigned int * d_lmerOffsets; unsigned int mem_size; KEY_T validBitMask = 0; //unsigned int timerGPU = 0; unsigned int k = l - 1; //cutilCheckError(cutCreateTimer(&timerGPU)); mem_size = (kmerCount) * sizeof(unsigned int) * 4; // 4-tuple for each kmer allocateMemory((void**) &d_lcount, mem_size); allocateMemory((void**) &d_lstart, mem_size); allocateMemory((void**) &d_estart, mem_size); allocateMemory((void**) &d_ecount, mem_size); allocateMemory((void**) &d_lmerOffsets, lmerCount * VALUE_SIZE); for (unsigned int i = 0; i < k * 2; i++) { validBitMask = (validBitMask << 1) | 1; } logMessage(LOG_LVL_DETAIL,"deb bit mask %lu\n",validBitMask); logMessage(LOG_LVL_DETAIL, "kernel: debruijnCount"); getOptimalLaunchConfiguration(lmerCount, &grid, &block); //verifyDebruijnCountHost(d_lmerKeys,d_lmerValues,lmerCount,d_TK,d_TV,d_bucketSeed,bucketCount,d_lcount,d_ecount,validBitMask,kmerCount); debruijnCount<<<grid,block>>>(d_lmerKeys,d_lmerValues,lmerCount,d_TK,d_TV,d_bucketSeed,bucketCount,d_lcount,d_ecount,validBitMask); CheckCUDAError(); //verifyDebruijnCountHost(d_lmerKeys,d_lmerValues,lmerCount,d_TK,d_TV,d_bucketSeed,bucketCount,d_lcount,d_ecount,validBitMask,kmerCount); /* we need to perform pre-fix scan on , lcount, ecount, lmerValues, * lcount and ecount has equal number of elements ,4*kmercount * lmer has lmerCount elements, choose whichever is larger */ // unsigned int maxElements=(lmerCount>4*kmerCount)?lmerCount:4*kmerCount; CUDPPConfiguration configKmer; configKmer.op = CUDPP_ADD; configKmer.datatype = CUDPP_UINT; configKmer.algorithm = CUDPP_SCAN; configKmer.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_EXCLUSIVE; CUDPPHandle scanplanKmer = 0; cudppPlan(&scanplanKmer, configKmer, 4 * kmerCount, 1, 0); CheckCUDAError(); cudppScan(scanplanKmer, d_lstart, d_lcount, 4 * kmerCount); cudppScan(scanplanKmer, d_estart, d_ecount, 4 * kmerCount); cudppDestroyPlan(scanplanKmer); CUDPPConfiguration configLmer; configLmer.op = CUDPP_ADD; configLmer.datatype = CUDPP_UINT; configLmer.algorithm = CUDPP_SCAN; configLmer.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_EXCLUSIVE; CUDPPHandle scanplanLmer = 0; CUDPPResult result = cudppPlan(&scanplanLmer, configLmer, lmerCount, 1, 0); CheckCUDAError(); cudppScan(scanplanLmer, d_lmerOffsets, d_lmerValues, lmerCount); cudppDestroyPlan(scanplanLmer); //validatePrefixScan(d_lstart,d_lcount,4*kmerCount,false); //validatePrefixScan(d_estart,d_ecount,4*kmerCount,false); //validatePrefixScan(d_lmerOffsets,d_lmerValues,lmerCount,false); unsigned int buffer[2]; readData(buffer, d_lmerOffsets + lmerCount - 1, 1, sizeof(unsigned int)); readData(buffer + 1, d_lmerValues + lmerCount - 1, 1, sizeof(unsigned int)); *ecount = buffer[0] + buffer[1]; logMessage(LOG_LVL_MSG, "debruijn vertex count:%d \ndebruijn edge count:%d", kmerCount, *ecount); allocateMemory((void**) d_ev, sizeof(EulerVertex) * (kmerCount)); allocateMemory((void**) d_l, sizeof(unsigned int) * (*ecount)); allocateMemory((void**) d_e, sizeof(unsigned int) * (*ecount)); allocateMemory((void**) d_ee, sizeof(EulerEdge) * (*ecount)); CheckCUDAError(); cudaMemset(*d_e, 0, sizeof(unsigned int) * (*ecount)); cudaMemset(*d_l, 0, sizeof(unsigned int) * (*ecount)); CheckCUDAError(); logMessage(LOG_LVL_DETAIL, "kernel: setupVertices"); getOptimalLaunchConfiguration(kmerCount, &grid, &block); //cutilCheckError(cutStartTimer(timerGPU)); setupVertices<<<grid,block>>>(d_kmerKeys,kmerCount,d_TK,d_TV,d_bucketSeed,bucketCount,*d_ev,d_lcount,d_lstart,d_ecount,d_estart); CheckCUDAError(); ///*DEBUG*/verifyleOffsets(d_lstart,d_lcount,d_estart,d_ecount,4*kmerCount,*ecount); getOptimalLaunchConfiguration(lmerCount, &grid, &block); //verifySetupEdges(d_lmerKeys,d_lmerValues,d_lmerOffsets,lmerCount, d_TK,d_TV,d_bucketSeed,bucketCount,*d_l,*d_e,*d_ee,d_lcount,d_lstart,d_ecount,d_estart,kmerCount,*ecount,validBitMask); logMessage(LOG_LVL_DETAIL,"kernel: setupEdges"); setupEdges<<<grid,block>>>(d_lmerKeys,d_lmerValues,d_lmerOffsets,lmerCount, d_TK,d_TV,d_bucketSeed,bucketCount,*d_l,*d_e,*d_ee,d_lstart,d_estart,validBitMask); CheckCUDAError(); //cutilCheckError(cutStopTimer(timerGPU)); //logMessage(LOG_LVL_MSG,"CPU Time : %f",cutGetTimerValue(timerGPU)); //constructDebruijnGold( d_idata, d_icount, kmerCount,kmerLength,totalVertices,validBitMask); //printDebruijnGraph(*d_ev, kmerCount, *d_l, *d_e, *d_ee, *ecount, k, 0); // may not need it //printDebruijnGraph(*d_ev,kmerCount,*d_l,*d_e,*d_ee,*ecount,k,1); //printData(*d_ev,*vcount); //printData(*d_ee,*ecount); //cutilCheckError(cutDeleteTimer(timerGPU)); deallocateMemory(d_lmerOffsets); deallocateMemory(d_lcount); deallocateMemory(d_lstart); deallocateMemory(d_estart); deallocateMemory(d_ecount); }
059e1461a788fdd30e04eeab66f78666fe640436.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <assert.h> #include "../../nvmatrix/include/nvmatrix_kernels.cuh" #include "../include/conv_util.cuh" using namespace std; __device__ inline float square(const float a) { return a * a; } /* * Horizontal reflection. * imgs: (numColors, imgSize, imgSize, numCases) * targets: (numColors, imgSize, imgSize, numCases) * * targets should be a different array from imgs. * * Block size: (4, 32) * blockIdx.y * 4 + threadIdx.y determines pixel * blockIdx.x * 32 * imgsPerThread + threadIdx.x determines case batch * */ template<int numColors, int imgsPerThread, bool checkCaseBounds> __global__ void kReflectH(float * imgs, float * targets, const int imgSize, const int numCases) { const int pxIdx = blockIdx.y * 4 + threadIdx.y; const int imgPixels = imgSize * imgSize; if (pxIdx < imgPixels) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdxY = pxIdx / imgSize; const int pxIdxX = pxIdx % imgSize; const int pxIdxXR = imgSize - 1 - pxIdxX; // reflected coordinate const int pxIdxR = pxIdxY * imgSize + pxIdxXR; imgs += pxIdx * numCases + caseIdx; targets += pxIdxR * numCases + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numCases) { #pragma unroll for (int c = 0; c < numColors; ++c) { targets[c * imgPixels * numCases + i * 32] = imgs[c * imgPixels * numCases + i * 32]; } } } } } /* * Horizontal reflection. * imgs: (numColors, imgSize, imgSize, numCases) * targets: (numColors, imgSize, imgSize, numCases) */ void convReflectHorizontal(THCudaTensor* images, THCudaTensor* targets, int imgSize) { int numCases = images->size[1]; int imgPixels = imgSize * imgSize; int numColors = images->size[0] / imgPixels; THAssert(numColors * imgPixels == images->size[0]); THAssert(numColors > 0 && numColors <= 3); THCudaTensor_resizeAs(targets, images); int imgsPerThread = numCases % 128 == 0 ? 4 : numCases % 64 == 0 ? 2 : 1; bool checkCaseBounds = numCases % (32 * imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numCases, imgsPerThread * 32), DIVUP(imgPixels, 4)); if (checkCaseBounds) { if (numColors == 1) { if (imgsPerThread == 1) { hipFuncSetCacheConfig(kReflectH<1, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<1, 1, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 2) { hipFuncSetCacheConfig(kReflectH<1, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<1, 2, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 4) { hipFuncSetCacheConfig(kReflectH<1, 4, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<1, 4, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } } else if (numColors == 2) { if (imgsPerThread == 1) { hipFuncSetCacheConfig(kReflectH<2, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<2, 1, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 2) { hipFuncSetCacheConfig(kReflectH<2, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<2, 2, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 4) { hipFuncSetCacheConfig(kReflectH<2, 4, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<2, 4, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } } else if (numColors == 3) { if (imgsPerThread == 1) { hipFuncSetCacheConfig(kReflectH<3, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<3, 1, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 2) { hipFuncSetCacheConfig(kReflectH<3, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<3, 2, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 4) { hipFuncSetCacheConfig(kReflectH<3, 4, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<3, 4, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } } } else { if (numColors == 1) { if (imgsPerThread == 1) { hipFuncSetCacheConfig(kReflectH<1, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<1, 1, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 2) { hipFuncSetCacheConfig(kReflectH<1, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<1, 2, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 4) { hipFuncSetCacheConfig(kReflectH<1, 4, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<1, 4, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } } else if (numColors == 2) { if (imgsPerThread == 1) { hipFuncSetCacheConfig(kReflectH<2, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<2, 1, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 2) { hipFuncSetCacheConfig(kReflectH<2, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<2, 2, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 4) { hipFuncSetCacheConfig(kReflectH<2, 4, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<2, 4, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } } else if (numColors == 3) { if (imgsPerThread == 1) { hipFuncSetCacheConfig(kReflectH<3, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<3, 1, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 2) { hipFuncSetCacheConfig(kReflectH<3, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<3, 2, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 4) { hipFuncSetCacheConfig(kReflectH<3, 4, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kReflectH<3, 4, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } } } getLastCudaError("kReflectH: kernel execution failed"); } /* * blockIdx.y determines module in batches of B_Y * blockIdx.x determines filter in batches of B_X * filtersPerThread * * weights: (numModules, numColors, filterPixels, numFilters) * Not fully coalesced if B_X < 32, so use cache. */ template <int B_Y, int B_X, int filtersPerThread> __global__ void kNormalizeLCWeights(float* weights, const uint numFilters, const int numModules, const uint weightsPerFilter, const float norm) { const uint moduleIdx = B_Y * blockIdx.y + threadIdx.y; const uint filterIdx = B_X * blockIdx.x + threadIdx.x; float prod[filtersPerThread]; #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] = 0; } if (moduleIdx < numModules) { weights += moduleIdx * weightsPerFilter * numFilters + filterIdx; for (uint p = 0; p < weightsPerFilter; ++p) { #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] += square(weights[p * numFilters + i * B_X]); } } #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] = sqrtf(prod[i]); prod[i] = prod[i] > norm ? __fdividef(norm, prod[i]) : 1.0f; } for (uint p = 0; p < weightsPerFilter; ++p) { #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { weights[p * numFilters + i * B_X] *= prod[i]; } } } } /* * weights: (numModules, numColors, filterPixels, numFilters) */ void normalizeLocalWeights(THCudaTensor* weights, int numModules, float norm) { int numFilters = weights->size[1]; int weightsPerFilter = weights->size[0] / numModules; THAssert(numModules * weightsPerFilter == weights->size[0]); THAssert(THCudaTensor_isContiguous(weights)); THAssert(numFilters % 16 == 0); int bx = numFilters % 32 == 0 ? 32 : 16; int by = bx == 32 ? 4 : 8; int filtersPerThread = numFilters % 128 == 0 ? 4 : numFilters % 64 == 0 ? 2 : 1; dim3 blocks(numFilters / (bx * filtersPerThread), DIVUP(numModules, by)); dim3 threads(bx, by); if (filtersPerThread == 4) { hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 4>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 4>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(weights), numFilters, numModules, weightsPerFilter, norm); } else if (filtersPerThread == 2) { hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 2>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 2>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(weights), numFilters, numModules, weightsPerFilter, norm); } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 1>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 1>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(weights), numFilters, numModules, weightsPerFilter, norm); } else { hipFuncSetCacheConfig(kNormalizeLCWeights<8, 16, 1>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kNormalizeLCWeights<8, 16, 1>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(weights), numFilters, numModules, weightsPerFilter, norm); } } } /* * Block size 4x32 * blockIdx.x determines img idx in batches of 32*imgsPerThread * blockIdx.y determines channel idx, pixel idx in batches of 4 * * threadIdx.x determins case idx * threadIdx.y determines pixel idx * * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kCrop(float* imgs, float* target, const uint numImages, const int imgStride, const uint imgSize, const uint tgtSize, const uint startY, const uint startX) { const uint imgPixels = imgSize * imgSize; const uint tgtPixels = tgtSize * tgtSize; const uint caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const uint blockChanIdx = blockIdx.y / DIVUP(tgtPixels, 4); const uint tgtPixelIdx = 4*(blockIdx.y % DIVUP(tgtPixels, 4)) + threadIdx.y; const uint tgtPxY = tgtPixelIdx / tgtSize; const uint tgtPxX = tgtPixelIdx % tgtSize; const uint srcPixelIdx = (startY + tgtPxY) * imgSize + startX + tgtPxX; if (tgtPixelIdx < tgtPixels) { imgs += (blockChanIdx * imgPixels + srcPixelIdx) * imgStride + caseIdx; target += (blockChanIdx * tgtPixels + tgtPixelIdx) * numImages + caseIdx; #pragma unroll for (uint i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || (caseIdx + 32 * i < numImages)) { target[i * 32] = imgs[i * 32]; } } } } /* * Block size 4x32 * blockIdx.y determines pixel idx in batches of 4 * blockIdx.x determines case idx in batches of 32*imgsPerThread * threadIdx.y determines pixel idx * threadIdx.x determines case idx * * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) * * Each thread produces (y,u,v) values for a particular (r,g,b) pixel * * The RGB --> YUV transform is (http://en.wikipedia.org/wiki/YUV): * * [Y] [ 0.2126 0.7152 0.0722 ][R] * [U] = [-0.09991 -0.33609 0.436 ][G] * [V] [ 0.615 -0.55861 -0.05639][B] */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kRGBToYUV(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdx = blockIdx.y * 4 + threadIdx.y; if (pxIdx < imgPixels) { const int imgChannelStride = imgPixels * imgStride; const int tgtChannelStride = imgPixels * numImages; imgs += pxIdx * imgStride + caseIdx; target += pxIdx * numImages + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numImages) { const float R = imgs[0 * imgChannelStride + i * 32]; const float G = imgs[1 * imgChannelStride + i * 32]; const float B = imgs[2 * imgChannelStride + i * 32]; target[0 * tgtChannelStride + i * 32] = 0.2126f * R + 0.7152f * G + 0.0722f * B; // Y target[1 * tgtChannelStride + i * 32] = -0.09991f * R + -0.33609f * G + 0.436f * B; // U target[2 * tgtChannelStride + i * 32] = 0.615f * R + -0.55861f * G + -0.05639f * B; // V } } } } __device__ inline float labf(const float x) { if (x > 0.0088564517f) { return __powf(x, 0.3333f); } return 7.787037f * x + 0.13793103f; } /* * Block size 4x32 * blockIdx.y determines pixel idx in batches of 4 * blockIdx.x determines case idx in batches of 32*imgsPerThread * threadIdx.y determines pixel idx * threadIdx.x determines case idx * * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) * * This proceeds in two steps. * * - First, RGB values are linearly transformed to XYZ as per * http://en.wikipedia.org/wiki/CIE_XYZ_color_space * - Second, XYZ values are nonlinearly transformed to L*a*b* as per * http://en.wikipedia.org/wiki/Lab_color_space#The_forward_transformation * * Each thread produces (L*,a*,b*) values for a particular (r,g,b) pixel * * The RGB --> XYZ transform is: * * [X] [0.49 0.31 0.2 ][R] * [Y] = 5.6506753 * [0.17697 0.8124 0.01063 ][G] * [Z] [0 0.01 0.99 ][B] * * NOTE: The input should be in the range 0-1. Don't do mean-subtraction beforehand. * * Then X_max, Y_max, Z_max = 5.6506753. * * The range of the L* values is [0, 100]. * If the center flag is given, the range will be [-50, 50]. * */ template <int imgsPerThread, bool checkCaseBounds, bool center> __global__ void kRGBToLAB(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdx = blockIdx.y * 4 + threadIdx.y; if (pxIdx < imgPixels) { const int imgChannelStride = imgPixels * imgStride; const int tgtChannelStride = imgPixels * numImages; imgs += pxIdx * imgStride + caseIdx; target += pxIdx * numImages + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numImages) { const float R = imgs[0 * imgChannelStride + i * 32]; const float G = imgs[1 * imgChannelStride + i * 32]; const float B = imgs[2 * imgChannelStride + i * 32]; const float X = (0.49f * R + 0.31f * G + 0.2f * B); const float Y = (0.17697f * R + 0.8124f * G + 0.01063f * B); const float Z = (0.01f * G + 0.99f * B); const float labX = labf(X); const float labY = labf(Y); const float labZ = labf(Z); target[0 * tgtChannelStride + i * 32] = 116.0f * labY - 16.0f - (center ? 50.0f : 0); // L* target[1 * tgtChannelStride + i * 32] = 500.0f * (labX - labY); // a* target[2 * tgtChannelStride + i * 32] = 200.0f * (labY - labZ); // b* } } } } /* * Block size 16x32. * Each block produces a 4x4 chunk of the output image. * threadIdx.y determines pixel idx in 4x4 chunk. * threadIdx.x determines case idx. * blockIdx.x determines case idx in batches of 32*imgsPerThread. * blockIdx.y determines 4x4 chunk idx, channel idx. * * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) * * imgSize = scale * tgtSize (roughly) * * This is a rather naive kernel that relies on cache for speed. But all it's doing * is basic texture manipulation, which is very local in nature, so it should be ok. * Also, it will in practice be a tiny fraction of the runtime of a large convnet. * * So that is my justification for being lazy here. */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kResizeBilinear(float* imgs, float* target, const int imgSize, const int tgtSize, const int numImages, const int imgStride, const float scale, const float centerScale) { const int numChunksX = DIVUP(tgtSize, 4); const int numChunks = numChunksX * numChunksX; const int channelIdx = blockIdx.y / numChunks; const int chunkIdx = blockIdx.y % numChunks; const int chunkIdxX = chunkIdx % numChunksX; const int chunkIdxY = chunkIdx / numChunksX; const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int imgPixels = imgSize * imgSize; const int tgtPixels = tgtSize * tgtSize; const int pxX = 4 * chunkIdxX + threadIdx.y % 4; const int pxY = 4 * chunkIdxY + threadIdx.y / 4; if (pxY < tgtSize && pxX < tgtSize) { const int pxIdx = pxY * tgtSize + pxX; imgs += channelIdx * imgPixels * imgStride + caseIdx; target += channelIdx * tgtPixels * numImages + pxIdx * numImages + caseIdx; // This will cause slight distortions at the edges when upsampling in some cases. // But I think that's not a big deal. const float srcPxX = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxX) * scale + centerScale)); const float srcPxY = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxY) * scale + centerScale)); const float u = floorf(srcPxX + 1) - srcPxX; const float w = srcPxY - floorf(srcPxY); // Consider doing max(0, min(imgSize, x)) here const int srcPx0 = (__float2int_rd(srcPxY) * imgSize + __float2int_rd(srcPxX)); // top-left const int srcPx1 = srcPx0 + 1; // top-right const int srcPx2 = srcPx0 + imgSize; // bottom-left const int srcPx3 = srcPx2 + 1; // bottom-right #pragma unroll for (int c = 0; c < imgsPerThread; ++c) { if (!checkCaseBounds || caseIdx + c * 32 < numImages) { const float val0 = imgs[srcPx0 * imgStride + c * 32]; const float val1 = imgs[srcPx1 * imgStride + c * 32]; const float val2 = imgs[srcPx2 * imgStride + c * 32]; const float val3 = imgs[srcPx3 * imgStride + c * 32]; const float c0 = u * (val0 - val1) + val1; const float c1 = u * (val2 - val3) + val3; target[32 * c] = w * (c1 - c0) + c0; } } } } /* * Block size B_YxB_X. * B_X*imgsPerThread*blockIdx.x + threadIdx.x determines img idx * B_Y*blockIdx.y + threadIdx.y determines img row (col if !horiz), channel idx * * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) * * target can be the same matrix as imgs. * radius must be one of 3, 5, 7, 9. * * Tried imgsPerThread, slower. */ template<int B_Y, int B_X, int radius> __global__ void kGaussianBlur(float* imgs, float* filter, float* target, const int imgSize, const int numImages, const int imgStride, const bool horiz, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilter[radius]; const int imgPixels = imgSize * imgSize; const int ty = B_Y * blockIdx.y + threadIdx.y; const int channelIdx = ty / imgSize; const int rowIdx = ty % imgSize; const int imgIdx = B_X*blockIdx.x + threadIdx.x; const int filterWidth = 2*radius+1; // const int tidx = B_Y * threadIdx.y + threadIdx.x; if (horiz) { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgSize * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * imgSize * numImages + imgIdx; } else { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * numImages + imgIdx; } float outputs[filterWidth-1]; #pragma unroll for (int r = 0; r < filterWidth-1; r++) { outputs[r] = 0; } if (threadIdx.x < filterWidth-1) { shFilter[threadIdx.x] = filter[threadIdx.x]; } __syncthreads(); if (imgIdx < numImages) { // This writes radius*2 = filterWidth - 1 values to outputs #pragma unroll for (int col = 0; col < radius; col++) { float px = imgs[0]; #pragma unroll for (int r = 0; r < radius + 1 + col; r++) { outputs[r] += px * shFilter[radius + col - r]; } imgs += horiz ? imgStride : imgStride * imgSize; } // Unfortunately this has to be at this level of granularity if (scaleTargets != 0) { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleTargets * target[0] + scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { float* t = &target[0]; t[0] = scaleTargets * t[0] + scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } else { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { target[0] = scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } } } /* * Block size B_YxB_X * blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, numOutputs, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int chansPerThread, bool checkCaseBounds> __global__ void kBedOfNails(float* imgs, float* target, const int imgSize, const int numChannels, const int numImages, const int startX, const int strideX, const int outputsX, const bool reverse, const float scaleTargets, const float scaleOutput) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numChanBlocks = DIVUP(numChannels, B_Y*chansPerThread); const int outputIdxX = blockIdx.x / numImgBlocks; const int outputIdxY = blockIdx.y / numChanBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockChanIdx = (blockIdx.y % numChanBlocks) * B_Y * chansPerThread; const int myChanIdx = (blockChanIdx + threadIdx.y*chansPerThread); if (myChanIdx >= numChannels) { return; } // if (blockIdx.x != 0 || blockIdx.y != 0) { // return; // } const int outputIdx = outputIdxY * outputsX + outputIdxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startImgPxX = startX + outputIdxX * strideX; const int startImgPxY = startX + outputIdxY * strideX; const int imgIdx = blockImgIdx + threadIdx.x; const int imgPx = startImgPxY * imgSize + startImgPxX; imgs += myChanIdx * imgPixels * numImages + imgPx * numImages + imgIdx; target += (myChanIdx * numOutputs + outputIdx) * numImages + imgIdx; if (scaleTargets != 0) { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleTargets * target[c * numOutputs * numImages + i * B_X] + scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleTargets * imgs[c * imgPixels * numImages + i * B_X] + scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } else { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } } /* * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, outputs, numImages) */ void _convBedOfNails(THCudaTensor* images, THCudaTensor* target, int numChannels, int imgSize, int startX, int strideX, bool reverse, float scaleTargets, float scaleOutput) { int numImages = reverse ? target->size[1] : images->size[1]; int imgPixels = imgSize * imgSize; THAssert(THCudaTensor_isContiguous(images)); THAssert(THCudaTensor_isContiguous(target)); THAssert(strideX > 1); int outputsX = DIVUP(imgSize, strideX); int outputs = outputsX * outputsX; if (reverse) { THAssert(target->size[0] == numChannels * outputs); } else { THAssert(images->size[0] == numChannels * imgPixels); } if (scaleTargets == 0) { if (reverse) { THCudaTensor_resize2d(images, numChannels * imgPixels, numImages); THCudaTensor_fill(images, 0); } else { THCudaTensor_resize2d(target, numChannels*outputs, numImages); } } else { if (reverse) { THAssert(images->size[0] == numChannels * outputs); THAssert(images->size[1] == numImages); } else { THAssert(target->size[0] == numChannels * outputs); THAssert(target->size[1] == numImages); } } int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; int chansPerThread = numChannels % 8 == 0 ? 2 : 1; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numChannels, 4 * chansPerThread) * outputsX); if (imgsPerThread == 4) { if (chansPerThread == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 1, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 1, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (chansPerThread == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 1, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 1, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 2, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 2, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } else { if (chansPerThread == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 1, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 1, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 2, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 2, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } } void convBedOfNails(THCudaTensor* images, THCudaTensor* target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(images, target, numChannels, imgSize, startX, strideX, false, scaleTargets, scaleOutput); } void convBedOfNailsUndo(THCudaTensor* actsGrad, THCudaTensor* target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(target, actsGrad, numChannels, imgSize, startX, strideX, true, scaleTargets, scaleOutput); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) */ void convGaussianBlur(THCudaTensor* images, THCudaTensor* filter, THCudaTensor* target, bool horiz, int numChannels, float scaleTargets, float scaleOutputs) { int numImages = images->size[1]; int radius = filter->size[1] / 2; int imgPixels = images->size[0] / numChannels; int imgSize = int(sqrt(imgPixels)); THAssert(imgPixels == imgSize * imgSize); THAssert(radius >= 1 && radius <= 4); THAssert(imgSize >= 2 * radius + 1); THAssert(filter->size[0] == 1); THAssert(images->size[0] == numChannels * imgPixels); THAssert(THCudaTensor_isContiguous(target)); if (scaleTargets == 0) { THCudaTensor_resizeAs(target, images); } else { THAssert(THCudaTensor_isSameSizeAs(target, images)); } dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, threads.x), DIVUP(numChannels*imgSize, threads.y)); if (radius == 1) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 1>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 1>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(filter), THCudaTensor_data(target), imgSize, numImages, images->stride[0], horiz, scaleTargets, scaleOutputs); } else if (radius == 2) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 2>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 2>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(filter), THCudaTensor_data(target), imgSize, numImages, images->stride[0], horiz, scaleTargets, scaleOutputs); } else if (radius == 3) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 3>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 3>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(filter), THCudaTensor_data(target), imgSize, numImages, images->stride[0], horiz, scaleTargets, scaleOutputs); } else if (radius == 4) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 4>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 4>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(filter), THCudaTensor_data(target), imgSize, numImages, images->stride[0], horiz, scaleTargets, scaleOutputs); } } /* * Block size 1x128 * blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread * blockIdx.y determines pixel.y * * So each block does one output for some number of images and all the fliters. * * threadIdx.x determines img idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int imgsPerThread, int numFilters, bool checkCaseBounds> __global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numImages, const int sizeX, const float addScale, const float powScale, const float minDiv) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y; const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += pxIdx * numImages + imgIdx; denoms += pxIdx * numImages + imgIdx; meanDiffs += imgIdx; target += pxIdx * numImages + imgIdx; float prod[numFilters][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = minDiv + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * 128] = prod[f][i]; target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale); } } } } /* * Block size B_YxB_X * blockIdx.x determines image idx in batches of B_X*imgsPerThread * blockIdx.y determines filter idx in batches of B_Y*filtersPerThread * blockIdx.z determines pixel * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale, const float minDiv) { const int imgPixels = imgSize * imgSize; const int pxIdxX = blockIdx.z % imgSize; const int pxIdxY = blockIdx.z / imgSize; const int blockImgIdx = blockIdx.x * B_X * imgsPerThread; const int blockFilterIdx = blockIdx.y * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = max(0, startPxY); const int loopStartX = max(0, startPxX); const int loopEndY = min(imgSize, startPxY + sizeX); const int loopEndX = min(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]); } } } } } #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[f][i] = minDiv + addScale * prod[f][i]; denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region of pixels for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale, const float minDiv) { __shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -sizeX/2 + blockPxX); const int startPxY = MAX(0, -sizeX/2 + blockPxY); const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3); const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -sizeX/2 + myPxY; const int myStartPxX = -sizeX/2 + myPxX; const int myEndPxY = myPxY + DIVUP(sizeX, 2); const int myEndPxX = myPxX + DIVUP(sizeX, 2); const int imgIdx = blockImgIdx + threadIdx.x; imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]); } } } } __syncthreads(); } } // imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX; // imgs += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = minDiv + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y */ template<int B_Y, int B_X, int imgsPerThread, bool checkCaseBounds, bool blocked> __global__ void kFCNorm(hipTextureObject_t imgs, hipTextureObject_t meanDiffs, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float addScale, const float powScale, const float minDiv) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; const int imgOffset = ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; const int meanDiffsOffset = pxIdx * numImages + imgIdx; // imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; // meanDiffs += pxIdx * numImages + imgIdx; target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; float prod[imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] = 0; } } const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx; const int loopStartF = blocked ? startF : MAX(0, startF); const int loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] += square(tex1Dfetch<float>(meanDiffs, meanDiffsOffset + f * imgPixels * numImages + i * B_X)); } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] = minDiv + addScale * prod[i]; target[i * B_X] = tex1Dfetch<float>(imgs, imgOffset + i * B_X) * __powf(prod[i], -powScale); } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numOutputs, imgPixels, numImages) * maxActs: (numOutputs, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds> __global__ void kCrossMapMaxPoolUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters, const int numImages, const int startF, const int poolSize, const int numOutputs, const int stride, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); // const int numOutputs = DIVUP(numFilters, stride); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int imgPixels = imgSize * imgSize; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; maxGrads += (/*(filterIdx) * imgPixels +*/ pxIdx) * numImages + imgIdx; maxActs += (/*(filterIdx) * imgPixels +*/ pxIdx) * numImages + imgIdx; target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; float prod[imgsPerThread]; // if (imgIdx != 0 || pxIdx != 0 || filterIdx != 0) { // return; // } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[i] = 0; } if (filterIdx < numFilters) { // const int startOut = max(0, (filterIdx-startF-poolSize)/ stride + 1); const int loopStartOut = max(0, (filterIdx-startF-poolSize)/ stride + 1); const int loopEndOut = min(numOutputs, (filterIdx - startF)/ stride + 1); for (int o = loopStartOut; o < loopEndOut; ++o) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float ma = maxActs[o * imgPixels * numImages + i * B_X]; const float mg = maxGrads[o * imgPixels * numImages + i * B_X]; const float img = imgs[i*B_X]; prod[i] += (img == ma) * mg; } } } // printf("gpu f start: %d, end: %d\n", loopStartF, loopEndF); if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { target[i * B_X] = prod[i]; } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i]; } } } } } /* * images: (numFilters, imgPixels, numImages) * maxGrads: (numOutputs, imgPixels, numImages) * maxActs: (numOutputs, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) */ void convCrossMapMaxPoolUndo(THCudaTensor* images, THCudaTensor* maxGrads, THCudaTensor* maxActs, THCudaTensor* target, const int imgSize, const int startF, const int poolSize, const int stride, const float scaleTargets, const float scaleOutputs) { int numImages = images->size[1]; int imgPixels = imgSize * imgSize; int numFilters = images->size[0] / imgPixels; int numOutputs = maxActs->size[0] / imgPixels; THAssert(images->size[0] == numFilters * imgPixels); THAssert(maxGrads->size[0] == numOutputs * imgPixels); THAssert(maxGrads->size[1] == numImages); THAssert(THCudaTensor_isSameSizeAs(maxGrads, maxActs)); THAssert(images->size[0] == numFilters * imgPixels); THAssert(THCudaTensor_isContiguous(images)); THAssert(THCudaTensor_isContiguous(maxGrads)); THAssert(THCudaTensor_isContiguous(maxActs)); THAssert(THCudaTensor_isSameSizeAs(maxGrads, maxActs)); // THAssert(numFilters % 16 == 0); // THAssert(numImages % 128 == 0); THAssert(stride <= poolSize); THAssert(startF <= 0); THAssert(startF + (numOutputs-1) * stride + poolSize >= numFilters); // All filters must be covered dim3 threads(32, 4); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; dim3 blocks(imgSize * DIVUP(numImages, threads.x * imgsPerThread), imgSize * DIVUP(numFilters, threads.y)); bool checkCaseBounds = numImages % (threads.x*imgsPerThread) != 0; if (scaleTargets == 0) { THCudaTensor_resizeAs(target, images); if (!checkCaseBounds) { if (imgsPerThread == 4) { hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } else if (imgsPerThread == 2) { hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } else { hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 1, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } } else { hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 1, false, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } } else { THAssert(THCudaTensor_isSameSizeAs(target, images)); if (!checkCaseBounds) { if (imgsPerThread == 4) { hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } else if (imgsPerThread == 2) { hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } else { hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 1, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } } else { hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 1, true, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } } getLastCudaError("convCrossMapMaxPoolUndo: kernel execution failed"); } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked> __global__ void kFRNormUndo(hipTextureObject_t outGrads, hipTextureObject_t denoms, hipTextureObject_t inputs, hipTextureObject_t acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float powScale, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int imgPixels = imgSize * imgSize; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; const int actsOffset = pxIdx * numImages + imgIdx; const int inputOffset = ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; target += inputOffset; float prod[imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[i] = 0; } const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx; const int loopStartF = blocked ? startF : MAX(0, startF); const int loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] += tex1Dfetch<float>(acts, actsOffset + f * imgPixels * numImages + i * B_X); } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = tex1Dfetch<float>(inputs, inputOffset + i * B_X); const float out = tex1Dfetch<float>(outGrads, inputOffset + i * B_X); const float den = tex1Dfetch<float>(denoms, inputOffset + i * B_X); prod[i] = inp * prod[i] + out * __powf(den, -powScale); target[i * B_X] = prod[i]; } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = tex1Dfetch<float>(inputs, inputOffset + i * B_X); const float out = tex1Dfetch<float>(outGrads, inputOffset + i * B_X); const float den = tex1Dfetch<float>(denoms, inputOffset + i * B_X); prod[i] = inp * prod[i] + out * __powf(den, -powScale); target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i]; } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y * * TODO: this is pretty wasteful of computation. a lot of threads basically compute the same products. */ template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked> //__launch_bounds__(128,16) __global__ void kFRNormUndo2(hipTextureObject_t outGrads, hipTextureObject_t inputs, hipTextureObject_t acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float addScale, const float powScale, const float minDiv, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int imgPixels = imgSize * imgSize; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; const int inpOffset = pxIdx * numImages + imgIdx; const int outOffset = ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; target += outOffset; float prod[imgsPerThread]; float denoms[imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[i] = 0; denoms[i] = 0; } int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx; int loopStartF = blocked ? startF : MAX(0, startF); int loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { // If an input is zero, then we shuldn't divide by it. const float grad = tex1Dfetch<float>(outGrads, inpOffset + f * imgPixels * numImages + i * B_X); const float act = tex1Dfetch<float>(acts, inpOffset + f * imgPixels * numImages + i * B_X); const float inp = tex1Dfetch<float>(inputs, inpOffset + f * imgPixels * numImages + i * B_X) + (act == 0); prod[i] += grad * act * __powf(__fdividef(act, inp), 1.0f/powScale); } } } startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx; loopStartF = blocked ? startF : MAX(0, startF); loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { denoms[i] += square(tex1Dfetch<float>(inputs, inpOffset + f * imgPixels * numImages + i * B_X)); } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = tex1Dfetch<float>(inputs, outOffset + i * B_X); const float out = tex1Dfetch<float>(outGrads, outOffset + i * B_X); denoms[i] = addScale * denoms[i] + minDiv; prod[i] = (-2 * powScale * addScale * inp * prod[i] + out * __powf(denoms[i], -powScale)); target[i * B_X] = prod[i]; } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = tex1Dfetch<float>(inputs, outOffset + i * B_X); const float out = tex1Dfetch<float>(outGrads, outOffset + i * B_X); denoms[i] = addScale * denoms[i] + minDiv; prod[i] = (-2 * powScale * addScale * inp * prod[i] + out * __powf(denoms[i], -powScale)); target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i]; } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalAvgUndo(float* avgGrads, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { for (int my = startOutputY; my < endOutputY; my++) { const float regionStartY = fmaxf(0, startX + my * strideX); const float regionEndY = fminf(imgSize, startX + my * strideX + subsX); const float regionSizeY = regionEndY - regionStartY; for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; const float regionStartX = fmaxf(0, startX + mx * strideX); const float regionEndX = fminf(imgSize, startX + mx * strideX + subsX); const float regionSizeX = regionEndX - regionStartX; // It's important to do the division here, because pushing division into the below // loops makes the code 4x slower. const float regionSizeInv = 1.0f / (regionSizeX * regionSizeY); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X] * regionSizeInv; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * maxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { __shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X]; } } } for (int my = startOutputY; my < endOutputY; my++) { for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i]; prod[f][i] += (img == ma) * mg; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * acts := -2 x scale x acts x outGrads / denoms */ template<int B_X, int eltsPerThread> __global__ void kRNormUndoPrelims(float* acts, hipTextureObject_t denoms, hipTextureObject_t outGrads, const uint numElements, const float scale) { const uint e = B_X * blockIdx.x * eltsPerThread + threadIdx.x; const uint numThreads = B_X * gridDim.x; for (uint i = e; i < numElements; i += numThreads*eltsPerThread) { #pragma unroll for (uint k = 0; k < eltsPerThread; k++) { if (i + k * B_X < numElements) { acts[i + k * B_X] = __fdividef(scale * tex1Dfetch<float>(outGrads, i + k * B_X) * acts[i + k * B_X], tex1Dfetch<float>(denoms, i + k * B_X)); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int imgPixels = imgSize * imgSize; const int startY = MAX(0, blockPxY + sizeX/2 - sizeX + 1); const int startX = MAX(0, blockPxX + sizeX/2 - sizeX + 1); const int endY = MIN(imgSize, blockPxY + sizeX/2 + 1); const int endX = MIN(imgSize, blockPxX + sizeX/2 + 1); const int imgIdx = blockImgIdx + threadIdx.x; acts += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; inputs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; outGrads += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int sy = startY; sy < endY; sy++) { for (int sx = startX; sx < endX; sx++) { const int outPx = sy * imgSize + sx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += acts[(f * B_Y * imgPixels + outPx) * numImages + i * B_X]; } } } } } // outGrads += blockPx * numImages; if (scaleTargets == 0) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kRNormUndo2(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { __shared__ float shActs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -DIVUP(sizeX,2) + blockPxX + 1); const int startPxY = MAX(0, -DIVUP(sizeX,2) + blockPxY + 1); const int endPxX = MIN(imgSize, blockPxX + sizeX/2 + 4); const int endPxY = MIN(imgSize, blockPxY + sizeX/2 + 4); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -DIVUP(sizeX,2) + myPxY + 1; const int myStartPxX = -DIVUP(sizeX,2) + myPxX + 1; const int myEndPxY = myPxY + sizeX/2 + 1; const int myEndPxX = myPxX + sizeX/2 + 1; const int imgIdx = blockImgIdx + threadIdx.x; acts += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; inputs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; outGrads += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shActs[ly + loadY][lx + loadX] = acts[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += shActs[f][threadIdx.x + i * B_X]; } } } } __syncthreads(); } } acts -= (loadY * imgPixels - myPxIdx) * numImages + loadX; acts += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = scaleTargets * target[f * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } } void convLocalMaxUndo(THCudaTensor* images, THCudaTensor* maxGrads, THCudaTensor* maxActs, THCudaTensor* target, int subsX, int startX, int strideX, int outputsX) { convLocalMaxUndo(images, maxGrads, maxActs, target, subsX, startX, strideX, outputsX, 0, 1); } /* * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalMaxUndo(THCudaTensor* images, THCudaTensor* maxGrads, THCudaTensor* maxActs, THCudaTensor* target, int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) { int outputs = outputsX * outputsX; int numImages = images->size[1]; int numFilters = maxGrads->size[0] / outputs; int imgPixels = images->size[0] / numFilters; THAssert(images->size[0] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); THAssert(imgSize * imgSize == imgPixels); THAssert(maxGrads->size[0] == numFilters * outputs); THAssert(maxGrads->size[1] == numImages); THAssert(THCudaTensor_isContiguous(images)); THAssert(THCudaTensor_isContiguous(maxGrads)); THAssert(THCudaTensor_isContiguous(maxActs)); THAssert(THCudaTensor_isSameSizeAs(maxGrads, maxActs)); THAssert(numFilters % 16 == 0); // THAssert(numImages % 128 == 0); THAssert(strideX <= subsX); THCudaTensor_resizeAs(target, images); THAssert(THCudaTensor_isContiguous(target)); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } getLastCudaError("convLocalMaxUndo: kernel execution failed"); } void convLocalAvgUndo(THCudaTensor* avgGrads, THCudaTensor* target, int subsX, int startX, int strideX, int outputsX, int imgSize) { convLocalAvgUndo(avgGrads, target, subsX, startX, strideX, outputsX, imgSize, 0, 1); } /* * avgGrads: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalAvgUndo(THCudaTensor* avgGrads, THCudaTensor* target, int subsX, int startX, int strideX, int outputsX, int imgSize, float scaleTargets, float scaleOutput) { int numImages = avgGrads->size[1]; int outputs = outputsX * outputsX; int imgPixels = imgSize * imgSize; int numFilters = avgGrads->size[0] / outputs; THAssert(avgGrads->size[0] == numFilters * outputs); THAssert(THCudaTensor_isContiguous(avgGrads)); THAssert(numFilters % 16 == 0); // THAssert(numImages % 128 == 0); THAssert(strideX <= subsX); THCudaTensor_resize2d(target, numFilters * imgPixels, numImages); THAssert(THCudaTensor_isContiguous(target)); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 4)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } getLastCudaError("convLocalAvgUndo: kernel execution failed"); } void convResponseNorm(THCudaTensor* images, THCudaTensor* denoms, THCudaTensor* target, int numFilters, int sizeX, float addScale, float powScale, float minDiv) { convContrastNorm(images, images, denoms, target, numFilters, sizeX, addScale, powScale, minDiv); } /* * images: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) */ void convContrastNorm(THCudaTensor* images, THCudaTensor* meanDiffs, THCudaTensor* denoms, THCudaTensor* target, int numFilters, int sizeX, float addScale, float powScale, float minDiv) { int numImages = images->size[1]; int imgPixels = images->size[0] / numFilters; THAssert(images->size[0] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); THAssert(imgSize * imgSize == imgPixels); THAssert(THCudaTensor_isSameSizeAs(meanDiffs, images)); THAssert(THCudaTensor_isContiguous(images)); THAssert(THCudaTensor_isContiguous(meanDiffs)); THAssert(numFilters % 16 == 0 || numFilters <= 8); THCudaTensor_resizeAs(target, images); THCudaTensor_resizeAs(denoms, images); THAssert(THCudaTensor_isContiguous(target)); if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = 8; int filtersPerThread = 4; int bx = 8; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; THAssert((imgsPerThread * bx) % 32 == 0); THAssert(numFilters % filtersPerThread == 0); dim3 threads(bx, 16); dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, hipFuncCachePreferL1); // L1 faster here hipLaunchKernelGGL(( kCNorm2<8, 8, 4, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv); } else { hipFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, hipFuncCachePreferL1); // L1 faster here hipLaunchKernelGGL(( kCNorm2<8, 8, 4, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv); } } else { bool checkCaseBounds = numImages % 128 != 0; if (numFilters <= 8) { dim3 threads(128); dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize); if (numFilters == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 1, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 1, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } else if (numFilters == 2) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 2, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 2, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } else if (numFilters == 3) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 3, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 3, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } else if (numFilters == 4) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 4, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 4, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } else if (numFilters == 5) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 5, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 5, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } else if (numFilters == 6) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 6, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 6, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } else if (numFilters == 7) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 7, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 7, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } else if (numFilters == 8) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 8, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 8, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } } else { dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,threads.x*4), (numFilters / (threads.y * 2)), imgPixels); if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv); } else { hipFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv); } } } getLastCudaError("convResponseNorm: kernel execution failed"); } void convContrastNormUndo(THCudaTensor* outGrads, THCudaTensor* denoms, THCudaTensor* meanDiffs, THCudaTensor* acts, THCudaTensor* target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { convResponseNormUndo(outGrads, denoms, meanDiffs, acts, target, numFilters, sizeX, addScale, powScale, scaleTargets, scaleOutput); } /* * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * THIS WILL OVERWRITE THE ACTS MATRIX. */ void convResponseNormUndo(THCudaTensor* outGrads, THCudaTensor* denoms, THCudaTensor* inputs, THCudaTensor* acts, THCudaTensor* target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { int numImages = outGrads->size[1]; int imgPixels = outGrads->size[0] / numFilters; int imgSize = int(sqrt(imgPixels)); THAssert(imgSize * imgSize == imgPixels); THAssert(outGrads->size[0] == numFilters * imgPixels); THAssert(THCudaTensor_isSameSizeAs(denoms, outGrads)); THAssert(THCudaTensor_isSameSizeAs(acts, denoms)); THAssert(THCudaTensor_isContiguous(outGrads)); THAssert(numFilters % 16 == 0); THCudaTensor_resizeAs(target, outGrads); THAssert(THCudaTensor_isContiguous(target)); // First do acts := -2 x scale x acts x outGrads / denoms // so that the main routine only has to do an addition in its inner loop. int prelimEltsPerThread = 8; dim3 threads(128); dim3 blocks(DIVUP(THCudaTensor_nElement(outGrads),(threads.x * prelimEltsPerThread))); bool checkPrelimBounds = THCudaTensor_nElement(outGrads) % (threads.x * prelimEltsPerThread) != 0; //printf("num elts: %d, blocks: %d\n", outGrads.getNumElements(), blocks.x); hipTextureObject_t texDenoms = THCudaTensor_getTextureObject(denoms); hipTextureObject_t texOutGrads = THCudaTensor_getTextureObject(outGrads); hipLaunchKernelGGL(( kRNormUndoPrelims<128, 8>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(acts), texDenoms, texOutGrads, THCudaTensor_nElement(outGrads), -2*addScale*powScale); checkCudaErrors(hipDestroyTextureObject(texDenoms)); checkCudaErrors(hipDestroyTextureObject(texOutGrads)); // Now the main routine if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) // NOTE: this stuff is not optimized for Kepler. Only kRNormUndo is. int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; int filtersPerThread = 4; int bx = 16; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; THAssert((imgsPerThread * bx) % 32 == 0); threads = dim3(bx, 16); blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (imgsPerThread == 8) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } } else { int imgsPerThread = numImages % 128 == 0 ? 4 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; threads = dim3(32, 4); blocks = dim3(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRNormUndo<4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo<4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } getLastCudaError("kRNormUndo: kernel execution failed"); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) * * imgSize = scale * tgtSize */ void convResizeBilinear(THCudaTensor* images, THCudaTensor* target, int imgSize, int tgtSize, float scale) { int imgPixels = imgSize * imgSize; int tgtPixels = tgtSize * tgtSize; int numChannels = images->size[0] / imgPixels; int numImages = images->size[1]; THAssert(images->size[0] == numChannels * imgPixels); THCudaTensor_resize2d(target, numChannels * tgtPixels, numImages); THAssert(THCudaTensor_isContiguous(target)); int numChunksX = DIVUP(tgtSize, 4); int numChunks = numChunksX * numChunksX; double imgCenter = imgSize * 0.5; double tgtCenter = tgtSize * 0.5; double centerScale = imgCenter - tgtCenter * scale; int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 16); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), numChannels * numChunks); if (imgsPerThread == 4) { if (checkCaseBounds) { hipFuncSetCacheConfig(kResizeBilinear<4, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<4, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, tgtSize, numImages, images->stride[0], scale, centerScale); } else { hipFuncSetCacheConfig(kResizeBilinear<4, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<4, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, tgtSize, numImages, images->stride[0], scale, centerScale); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { hipFuncSetCacheConfig(kResizeBilinear<2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<2, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, tgtSize, numImages, images->stride[0], scale, centerScale); } else { hipFuncSetCacheConfig(kResizeBilinear<2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<2, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, tgtSize, numImages, images->stride[0], scale, centerScale); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kResizeBilinear<1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<1, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, tgtSize, numImages, images->stride[0], scale, centerScale); } else { hipFuncSetCacheConfig(kResizeBilinear<1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kResizeBilinear<1, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgSize, tgtSize, numImages, images->stride[0], scale, centerScale); } } getLastCudaError("convResizeBilinear: kernel execution failed"); } /* * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) */ void convRGBToYUV(THCudaTensor* images, THCudaTensor* target) { int imgPixels = images->size[0] / 3; int numImages = images->size[1]; THAssert(images->size[0] == 3 * imgPixels); THCudaTensor_resize2d(target, 3 * imgPixels, numImages); THAssert(THCudaTensor_isContiguous(target)); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4)); if (imgsPerThread == 4) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToYUV<4, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<4, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { hipFuncSetCacheConfig(kRGBToYUV<4, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<4, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToYUV<2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<2, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { hipFuncSetCacheConfig(kRGBToYUV<2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<2, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToYUV<1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<1, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { hipFuncSetCacheConfig(kRGBToYUV<1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToYUV<1, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } getLastCudaError("convRGBToYUV: kernel execution failed"); } /* * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) */ void convRGBToLAB(THCudaTensor* images, THCudaTensor* target, bool center) { int imgPixels = images->size[0] / 3; int numImages = images->size[1]; THAssert(images->size[0] == 3 * imgPixels); THCudaTensor_resize2d(target, 3 * imgPixels, numImages); THAssert(THCudaTensor_isContiguous(target)); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4)); if (imgsPerThread == 4) { if (center) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<4, true, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { hipFuncSetCacheConfig(kRGBToLAB<4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<4, false, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<4, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { hipFuncSetCacheConfig(kRGBToLAB<4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<4, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } } else if (imgsPerThread == 2) { if (center) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<2, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<2, true, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { hipFuncSetCacheConfig(kRGBToLAB<2, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<2, false, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<2, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<2, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { hipFuncSetCacheConfig(kRGBToLAB<2, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<2, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } } else { if (center) { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<1, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<1, true, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { hipFuncSetCacheConfig(kRGBToLAB<1, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<1, false, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kRGBToLAB<1, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<1, true, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { hipFuncSetCacheConfig(kRGBToLAB<1, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRGBToLAB<1, false, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } } getLastCudaError("convRGBToLAB: kernel execution failed"); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) */ void convCrop(THCudaTensor* imgs, THCudaTensor* target, int imgSize, int tgtSize, int startY, int startX) { int numImages = imgs->size[1]; int imgPixels = imgSize * imgSize; int tgtPixels = tgtSize * tgtSize; int numChannels = imgs->size[0] / imgPixels; THAssert(imgs->size[0] == imgPixels * numChannels); THAssert(imgPixels == imgSize * imgSize); THAssert(imgSize - startY >= tgtSize); THAssert(imgSize - startX >= tgtSize); THAssert(startY >= 0); THAssert(startX >= 0); THCudaTensor_resize2d(target, numChannels * tgtPixels, numImages); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 blocks(DIVUP(numImages, 32 * imgsPerThread), numChannels * DIVUP(tgtPixels, 4)); dim3 threads(32, 4); if (imgsPerThread == 4) { if (checkCaseBounds) { hipLaunchKernelGGL(( kCrop<4, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(imgs), THCudaTensor_data(target), numImages, imgs->stride[0], imgSize, tgtSize, startY, startX); } else { hipLaunchKernelGGL(( kCrop<4, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(imgs), THCudaTensor_data(target), numImages, imgs->stride[0], imgSize, tgtSize, startY, startX); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { hipLaunchKernelGGL(( kCrop<2, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(imgs), THCudaTensor_data(target), numImages, imgs->stride[0], imgSize, tgtSize, startY, startX); } else { hipLaunchKernelGGL(( kCrop<2, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(imgs), THCudaTensor_data(target), numImages, imgs->stride[0], imgSize, tgtSize, startY, startX); } } else { if (checkCaseBounds) { hipLaunchKernelGGL(( kCrop<1, true>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(imgs), THCudaTensor_data(target), numImages, imgs->stride[0], imgSize, tgtSize, startY, startX); } else { hipLaunchKernelGGL(( kCrop<1, false>), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(imgs), THCudaTensor_data(target), numImages, imgs->stride[0], imgSize, tgtSize, startY, startX); } } getLastCudaError("convCrop: kernel execution failed"); } /* * images: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * Note: at present, I have no code to compute the meanDiffs. So it should be set * to be equal to images. In other words, this isn't really doing contrast normalization, * just response normalization. */ void convContrastNormCrossMap(THCudaTensor* images, THCudaTensor* meanDiffs, THCudaTensor* target, int numFilters, int sizeF, float addScale, float powScale, float minDiv, bool blocked) { int numImages = images->size[1]; int imgPixels = images->size[0] / numFilters; THAssert(images->size[0] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); THAssert(imgSize * imgSize == imgPixels); THAssert(THCudaTensor_isSameSizeAs(meanDiffs, images)); THAssert(sizeF > 0 && sizeF <= numFilters); THAssert(THCudaTensor_isContiguous(images)); THAssert(THCudaTensor_isContiguous(meanDiffs)); THAssert(numFilters % 16 == 0); THCudaTensor_resizeAs(target, images); // THCudaTensor_resizeAs(denoms, images); THAssert(THCudaTensor_isContiguous(target)); bool checkCaseBounds = numImages % 128 != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize); // printf("convContrastNormCrossMap imgs: %p, meanDiffs: %p, denoms: %p, target: %p, imgSize: %d, numFilters: %d, numImages: %d, sizeF: %d, addScale: %f, powScale: %f, minDiv: %f, blocked: %d\n", // THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, blocked); hipTextureObject_t texImages = THCudaTensor_getTextureObject(images); hipTextureObject_t texMeanDiffs = THCudaTensor_getTextureObject(meanDiffs); if (blocked) { if (checkCaseBounds) { hipFuncSetCacheConfig(kFCNorm<4, 32, 4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFCNorm<4, 32, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, texImages, texMeanDiffs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv); } else { hipFuncSetCacheConfig(kFCNorm<4, 32, 4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFCNorm<4, 32, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, texImages, texMeanDiffs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kFCNorm<4, 32, 4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFCNorm<4, 32, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, texImages, texMeanDiffs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv); } else { hipFuncSetCacheConfig(kFCNorm<4, 32, 4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFCNorm<4, 32, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, texImages, texMeanDiffs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv); } } checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texMeanDiffs)); getLastCudaError("convContrastNormCrossMap: kernel execution failed"); } /* * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * THIS WILL OVERWRITE THE ACTS MATRIX. */ void convResponseNormCrossMapUndo(THCudaTensor* outGrads, THCudaTensor* inputs, THCudaTensor* acts, THCudaTensor* target, int numFilters, int sizeF, float addScale, float powScale, float minDiv, bool blocked, float scaleTargets, float scaleOutput) { int numImages = outGrads->size[1]; int imgPixels = outGrads->size[0] / numFilters; int imgSize = int(sqrt(imgPixels)); THAssert(imgSize * imgSize == imgPixels); THAssert(sizeF > 0 && sizeF <= numFilters); THAssert(outGrads->size[0] == numFilters * imgPixels); THAssert(THCudaTensor_isContiguous(outGrads)); THAssert(numFilters % 16 == 0); THCudaTensor_resizeAs(target, outGrads); THAssert(THCudaTensor_isContiguous(target)); dim3 threads2 = dim3(32, 4); dim3 blocks2 = dim3(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize); bool checkCaseBounds = (numImages % 128) != 0; hipTextureObject_t texOutGrads = THCudaTensor_getTextureObject(outGrads); hipTextureObject_t texInputs = THCudaTensor_getTextureObject(inputs); hipTextureObject_t texActs = THCudaTensor_getTextureObject(acts); if (blocked) { if (scaleTargets == 0 && scaleOutput == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, false, true, true>), dim3(blocks2), dim3(threads2), 0, 0, texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, false, false, true>), dim3(blocks2), dim3(threads2), 0, 0, texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, true, true, true>), dim3(blocks2), dim3(threads2), 0, 0, texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, true, false, true>), dim3(blocks2), dim3(threads2), 0, 0, texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } } } else { if (scaleTargets == 0 && scaleOutput == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, false, true, false>), dim3(blocks2), dim3(threads2), 0, 0, texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, false, false, false>), dim3(blocks2), dim3(threads2), 0, 0, texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, true, true, false>), dim3(blocks2), dim3(threads2), 0, 0, texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, true, false, false>), dim3(blocks2), dim3(threads2), 0, 0, texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } } } checkCudaErrors(hipDestroyTextureObject(texOutGrads)); checkCudaErrors(hipDestroyTextureObject(texInputs)); checkCudaErrors(hipDestroyTextureObject(texActs)); getLastCudaError("convResponseNormCrossMapUndo: kernel execution failed"); } void convResponseNormCrossMap(THCudaTensor* images, THCudaTensor* target, int numFilters, int sizeF, float addScale, float powScale, float minDiv, bool blocked) { convContrastNormCrossMap(images, images, target, numFilters, sizeF, addScale, powScale, minDiv, blocked); } /* * images: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) */ void convResponseNormCrossMap(THCudaTensor* images, THCudaTensor* target, int numFilters, int sizeF, float addScale, float powScale, bool blocked) { convContrastNormCrossMap(images, images, target, numFilters, sizeF, addScale, powScale, 1, blocked); } void convLocalMaxPool(THCudaTensor* images, THCudaTensor* target, int numFilters, int subsX, int startX, int strideX, int outputsX) { MaxPooler pooler; convLocalPool(images, target, numFilters, subsX, startX, strideX, outputsX, pooler); } void convLocalAvgPool(THCudaTensor* images, THCudaTensor* target, int numFilters, int subsX, int startX, int strideX, int outputsX) { AvgPooler pooler; convLocalPool(images, target, numFilters, subsX, startX, strideX, outputsX, pooler); } void convCrossMapMaxPool(THCudaTensor* images, THCudaTensor* target, const int startF, const int poolSize, const int numOutputs, const int stride, const int imgSize) { MaxPooler pooler; convPoolCrossMap(images, target, startF, poolSize, numOutputs, stride, imgSize, pooler); }
059e1461a788fdd30e04eeab66f78666fe640436.cu
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <assert.h> #include "../../nvmatrix/include/nvmatrix_kernels.cuh" #include "../include/conv_util.cuh" using namespace std; __device__ inline float square(const float a) { return a * a; } /* * Horizontal reflection. * imgs: (numColors, imgSize, imgSize, numCases) * targets: (numColors, imgSize, imgSize, numCases) * * targets should be a different array from imgs. * * Block size: (4, 32) * blockIdx.y * 4 + threadIdx.y determines pixel * blockIdx.x * 32 * imgsPerThread + threadIdx.x determines case batch * */ template<int numColors, int imgsPerThread, bool checkCaseBounds> __global__ void kReflectH(float * imgs, float * targets, const int imgSize, const int numCases) { const int pxIdx = blockIdx.y * 4 + threadIdx.y; const int imgPixels = imgSize * imgSize; if (pxIdx < imgPixels) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdxY = pxIdx / imgSize; const int pxIdxX = pxIdx % imgSize; const int pxIdxXR = imgSize - 1 - pxIdxX; // reflected coordinate const int pxIdxR = pxIdxY * imgSize + pxIdxXR; imgs += pxIdx * numCases + caseIdx; targets += pxIdxR * numCases + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numCases) { #pragma unroll for (int c = 0; c < numColors; ++c) { targets[c * imgPixels * numCases + i * 32] = imgs[c * imgPixels * numCases + i * 32]; } } } } } /* * Horizontal reflection. * imgs: (numColors, imgSize, imgSize, numCases) * targets: (numColors, imgSize, imgSize, numCases) */ void convReflectHorizontal(THCudaTensor* images, THCudaTensor* targets, int imgSize) { int numCases = images->size[1]; int imgPixels = imgSize * imgSize; int numColors = images->size[0] / imgPixels; THAssert(numColors * imgPixels == images->size[0]); THAssert(numColors > 0 && numColors <= 3); THCudaTensor_resizeAs(targets, images); int imgsPerThread = numCases % 128 == 0 ? 4 : numCases % 64 == 0 ? 2 : 1; bool checkCaseBounds = numCases % (32 * imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numCases, imgsPerThread * 32), DIVUP(imgPixels, 4)); if (checkCaseBounds) { if (numColors == 1) { if (imgsPerThread == 1) { cudaFuncSetCacheConfig(kReflectH<1, 1, true>, cudaFuncCachePreferL1); kReflectH<1, 1, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 2) { cudaFuncSetCacheConfig(kReflectH<1, 2, true>, cudaFuncCachePreferL1); kReflectH<1, 2, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 4) { cudaFuncSetCacheConfig(kReflectH<1, 4, true>, cudaFuncCachePreferL1); kReflectH<1, 4, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } } else if (numColors == 2) { if (imgsPerThread == 1) { cudaFuncSetCacheConfig(kReflectH<2, 1, true>, cudaFuncCachePreferL1); kReflectH<2, 1, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 2) { cudaFuncSetCacheConfig(kReflectH<2, 2, true>, cudaFuncCachePreferL1); kReflectH<2, 2, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 4) { cudaFuncSetCacheConfig(kReflectH<2, 4, true>, cudaFuncCachePreferL1); kReflectH<2, 4, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } } else if (numColors == 3) { if (imgsPerThread == 1) { cudaFuncSetCacheConfig(kReflectH<3, 1, true>, cudaFuncCachePreferL1); kReflectH<3, 1, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 2) { cudaFuncSetCacheConfig(kReflectH<3, 2, true>, cudaFuncCachePreferL1); kReflectH<3, 2, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 4) { cudaFuncSetCacheConfig(kReflectH<3, 4, true>, cudaFuncCachePreferL1); kReflectH<3, 4, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } } } else { if (numColors == 1) { if (imgsPerThread == 1) { cudaFuncSetCacheConfig(kReflectH<1, 1, false>, cudaFuncCachePreferL1); kReflectH<1, 1, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 2) { cudaFuncSetCacheConfig(kReflectH<1, 2, false>, cudaFuncCachePreferL1); kReflectH<1, 2, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 4) { cudaFuncSetCacheConfig(kReflectH<1, 4, false>, cudaFuncCachePreferL1); kReflectH<1, 4, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } } else if (numColors == 2) { if (imgsPerThread == 1) { cudaFuncSetCacheConfig(kReflectH<2, 1, false>, cudaFuncCachePreferL1); kReflectH<2, 1, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 2) { cudaFuncSetCacheConfig(kReflectH<2, 2, false>, cudaFuncCachePreferL1); kReflectH<2, 2, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 4) { cudaFuncSetCacheConfig(kReflectH<2, 4, false>, cudaFuncCachePreferL1); kReflectH<2, 4, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } } else if (numColors == 3) { if (imgsPerThread == 1) { cudaFuncSetCacheConfig(kReflectH<3, 1, false>, cudaFuncCachePreferL1); kReflectH<3, 1, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 2) { cudaFuncSetCacheConfig(kReflectH<3, 2, false>, cudaFuncCachePreferL1); kReflectH<3, 2, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } else if (imgsPerThread == 4) { cudaFuncSetCacheConfig(kReflectH<3, 4, false>, cudaFuncCachePreferL1); kReflectH<3, 4, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(targets), imgSize, numCases); } } } getLastCudaError("kReflectH: kernel execution failed"); } /* * blockIdx.y determines module in batches of B_Y * blockIdx.x determines filter in batches of B_X * filtersPerThread * * weights: (numModules, numColors, filterPixels, numFilters) * Not fully coalesced if B_X < 32, so use cache. */ template <int B_Y, int B_X, int filtersPerThread> __global__ void kNormalizeLCWeights(float* weights, const uint numFilters, const int numModules, const uint weightsPerFilter, const float norm) { const uint moduleIdx = B_Y * blockIdx.y + threadIdx.y; const uint filterIdx = B_X * blockIdx.x + threadIdx.x; float prod[filtersPerThread]; #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] = 0; } if (moduleIdx < numModules) { weights += moduleIdx * weightsPerFilter * numFilters + filterIdx; for (uint p = 0; p < weightsPerFilter; ++p) { #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] += square(weights[p * numFilters + i * B_X]); } } #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { prod[i] = sqrtf(prod[i]); prod[i] = prod[i] > norm ? __fdividef(norm, prod[i]) : 1.0f; } for (uint p = 0; p < weightsPerFilter; ++p) { #pragma unroll for (uint i = 0; i < filtersPerThread; ++i) { weights[p * numFilters + i * B_X] *= prod[i]; } } } } /* * weights: (numModules, numColors, filterPixels, numFilters) */ void normalizeLocalWeights(THCudaTensor* weights, int numModules, float norm) { int numFilters = weights->size[1]; int weightsPerFilter = weights->size[0] / numModules; THAssert(numModules * weightsPerFilter == weights->size[0]); THAssert(THCudaTensor_isContiguous(weights)); THAssert(numFilters % 16 == 0); int bx = numFilters % 32 == 0 ? 32 : 16; int by = bx == 32 ? 4 : 8; int filtersPerThread = numFilters % 128 == 0 ? 4 : numFilters % 64 == 0 ? 2 : 1; dim3 blocks(numFilters / (bx * filtersPerThread), DIVUP(numModules, by)); dim3 threads(bx, by); if (filtersPerThread == 4) { cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 4>, cudaFuncCachePreferL1); kNormalizeLCWeights<4, 32, 4><<<blocks, threads, 0>>>(THCudaTensor_data(weights), numFilters, numModules, weightsPerFilter, norm); } else if (filtersPerThread == 2) { cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 2>, cudaFuncCachePreferL1); kNormalizeLCWeights<4, 32, 2><<<blocks, threads, 0>>>(THCudaTensor_data(weights), numFilters, numModules, weightsPerFilter, norm); } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 1>, cudaFuncCachePreferL1); kNormalizeLCWeights<4, 32, 1><<<blocks, threads, 0>>>(THCudaTensor_data(weights), numFilters, numModules, weightsPerFilter, norm); } else { cudaFuncSetCacheConfig(kNormalizeLCWeights<8, 16, 1>, cudaFuncCachePreferL1); kNormalizeLCWeights<8, 16, 1><<<blocks, threads, 0>>>(THCudaTensor_data(weights), numFilters, numModules, weightsPerFilter, norm); } } } /* * Block size 4x32 * blockIdx.x determines img idx in batches of 32*imgsPerThread * blockIdx.y determines channel idx, pixel idx in batches of 4 * * threadIdx.x determins case idx * threadIdx.y determines pixel idx * * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kCrop(float* imgs, float* target, const uint numImages, const int imgStride, const uint imgSize, const uint tgtSize, const uint startY, const uint startX) { const uint imgPixels = imgSize * imgSize; const uint tgtPixels = tgtSize * tgtSize; const uint caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const uint blockChanIdx = blockIdx.y / DIVUP(tgtPixels, 4); const uint tgtPixelIdx = 4*(blockIdx.y % DIVUP(tgtPixels, 4)) + threadIdx.y; const uint tgtPxY = tgtPixelIdx / tgtSize; const uint tgtPxX = tgtPixelIdx % tgtSize; const uint srcPixelIdx = (startY + tgtPxY) * imgSize + startX + tgtPxX; if (tgtPixelIdx < tgtPixels) { imgs += (blockChanIdx * imgPixels + srcPixelIdx) * imgStride + caseIdx; target += (blockChanIdx * tgtPixels + tgtPixelIdx) * numImages + caseIdx; #pragma unroll for (uint i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || (caseIdx + 32 * i < numImages)) { target[i * 32] = imgs[i * 32]; } } } } /* * Block size 4x32 * blockIdx.y determines pixel idx in batches of 4 * blockIdx.x determines case idx in batches of 32*imgsPerThread * threadIdx.y determines pixel idx * threadIdx.x determines case idx * * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) * * Each thread produces (y,u,v) values for a particular (r,g,b) pixel * * The RGB --> YUV transform is (http://en.wikipedia.org/wiki/YUV): * * [Y] [ 0.2126 0.7152 0.0722 ][R] * [U] = [-0.09991 -0.33609 0.436 ][G] * [V] [ 0.615 -0.55861 -0.05639][B] */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kRGBToYUV(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdx = blockIdx.y * 4 + threadIdx.y; if (pxIdx < imgPixels) { const int imgChannelStride = imgPixels * imgStride; const int tgtChannelStride = imgPixels * numImages; imgs += pxIdx * imgStride + caseIdx; target += pxIdx * numImages + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numImages) { const float R = imgs[0 * imgChannelStride + i * 32]; const float G = imgs[1 * imgChannelStride + i * 32]; const float B = imgs[2 * imgChannelStride + i * 32]; target[0 * tgtChannelStride + i * 32] = 0.2126f * R + 0.7152f * G + 0.0722f * B; // Y target[1 * tgtChannelStride + i * 32] = -0.09991f * R + -0.33609f * G + 0.436f * B; // U target[2 * tgtChannelStride + i * 32] = 0.615f * R + -0.55861f * G + -0.05639f * B; // V } } } } __device__ inline float labf(const float x) { if (x > 0.0088564517f) { return __powf(x, 0.3333f); } return 7.787037f * x + 0.13793103f; } /* * Block size 4x32 * blockIdx.y determines pixel idx in batches of 4 * blockIdx.x determines case idx in batches of 32*imgsPerThread * threadIdx.y determines pixel idx * threadIdx.x determines case idx * * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) * * This proceeds in two steps. * * - First, RGB values are linearly transformed to XYZ as per * http://en.wikipedia.org/wiki/CIE_XYZ_color_space * - Second, XYZ values are nonlinearly transformed to L*a*b* as per * http://en.wikipedia.org/wiki/Lab_color_space#The_forward_transformation * * Each thread produces (L*,a*,b*) values for a particular (r,g,b) pixel * * The RGB --> XYZ transform is: * * [X] [0.49 0.31 0.2 ][R] * [Y] = 5.6506753 * [0.17697 0.8124 0.01063 ][G] * [Z] [0 0.01 0.99 ][B] * * NOTE: The input should be in the range 0-1. Don't do mean-subtraction beforehand. * * Then X_max, Y_max, Z_max = 5.6506753. * * The range of the L* values is [0, 100]. * If the center flag is given, the range will be [-50, 50]. * */ template <int imgsPerThread, bool checkCaseBounds, bool center> __global__ void kRGBToLAB(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdx = blockIdx.y * 4 + threadIdx.y; if (pxIdx < imgPixels) { const int imgChannelStride = imgPixels * imgStride; const int tgtChannelStride = imgPixels * numImages; imgs += pxIdx * imgStride + caseIdx; target += pxIdx * numImages + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numImages) { const float R = imgs[0 * imgChannelStride + i * 32]; const float G = imgs[1 * imgChannelStride + i * 32]; const float B = imgs[2 * imgChannelStride + i * 32]; const float X = (0.49f * R + 0.31f * G + 0.2f * B); const float Y = (0.17697f * R + 0.8124f * G + 0.01063f * B); const float Z = (0.01f * G + 0.99f * B); const float labX = labf(X); const float labY = labf(Y); const float labZ = labf(Z); target[0 * tgtChannelStride + i * 32] = 116.0f * labY - 16.0f - (center ? 50.0f : 0); // L* target[1 * tgtChannelStride + i * 32] = 500.0f * (labX - labY); // a* target[2 * tgtChannelStride + i * 32] = 200.0f * (labY - labZ); // b* } } } } /* * Block size 16x32. * Each block produces a 4x4 chunk of the output image. * threadIdx.y determines pixel idx in 4x4 chunk. * threadIdx.x determines case idx. * blockIdx.x determines case idx in batches of 32*imgsPerThread. * blockIdx.y determines 4x4 chunk idx, channel idx. * * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) * * imgSize = scale * tgtSize (roughly) * * This is a rather naive kernel that relies on cache for speed. But all it's doing * is basic texture manipulation, which is very local in nature, so it should be ok. * Also, it will in practice be a tiny fraction of the runtime of a large convnet. * * So that is my justification for being lazy here. */ template <int imgsPerThread, bool checkCaseBounds> __global__ void kResizeBilinear(float* imgs, float* target, const int imgSize, const int tgtSize, const int numImages, const int imgStride, const float scale, const float centerScale) { const int numChunksX = DIVUP(tgtSize, 4); const int numChunks = numChunksX * numChunksX; const int channelIdx = blockIdx.y / numChunks; const int chunkIdx = blockIdx.y % numChunks; const int chunkIdxX = chunkIdx % numChunksX; const int chunkIdxY = chunkIdx / numChunksX; const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int imgPixels = imgSize * imgSize; const int tgtPixels = tgtSize * tgtSize; const int pxX = 4 * chunkIdxX + threadIdx.y % 4; const int pxY = 4 * chunkIdxY + threadIdx.y / 4; if (pxY < tgtSize && pxX < tgtSize) { const int pxIdx = pxY * tgtSize + pxX; imgs += channelIdx * imgPixels * imgStride + caseIdx; target += channelIdx * tgtPixels * numImages + pxIdx * numImages + caseIdx; // This will cause slight distortions at the edges when upsampling in some cases. // But I think that's not a big deal. const float srcPxX = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxX) * scale + centerScale)); const float srcPxY = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxY) * scale + centerScale)); const float u = floorf(srcPxX + 1) - srcPxX; const float w = srcPxY - floorf(srcPxY); // Consider doing max(0, min(imgSize, x)) here const int srcPx0 = (__float2int_rd(srcPxY) * imgSize + __float2int_rd(srcPxX)); // top-left const int srcPx1 = srcPx0 + 1; // top-right const int srcPx2 = srcPx0 + imgSize; // bottom-left const int srcPx3 = srcPx2 + 1; // bottom-right #pragma unroll for (int c = 0; c < imgsPerThread; ++c) { if (!checkCaseBounds || caseIdx + c * 32 < numImages) { const float val0 = imgs[srcPx0 * imgStride + c * 32]; const float val1 = imgs[srcPx1 * imgStride + c * 32]; const float val2 = imgs[srcPx2 * imgStride + c * 32]; const float val3 = imgs[srcPx3 * imgStride + c * 32]; const float c0 = u * (val0 - val1) + val1; const float c1 = u * (val2 - val3) + val3; target[32 * c] = w * (c1 - c0) + c0; } } } } /* * Block size B_YxB_X. * B_X*imgsPerThread*blockIdx.x + threadIdx.x determines img idx * B_Y*blockIdx.y + threadIdx.y determines img row (col if !horiz), channel idx * * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) * * target can be the same matrix as imgs. * radius must be one of 3, 5, 7, 9. * * Tried imgsPerThread, slower. */ template<int B_Y, int B_X, int radius> __global__ void kGaussianBlur(float* imgs, float* filter, float* target, const int imgSize, const int numImages, const int imgStride, const bool horiz, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilter[radius]; const int imgPixels = imgSize * imgSize; const int ty = B_Y * blockIdx.y + threadIdx.y; const int channelIdx = ty / imgSize; const int rowIdx = ty % imgSize; const int imgIdx = B_X*blockIdx.x + threadIdx.x; const int filterWidth = 2*radius+1; // const int tidx = B_Y * threadIdx.y + threadIdx.x; if (horiz) { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgSize * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * imgSize * numImages + imgIdx; } else { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * numImages + imgIdx; } float outputs[filterWidth-1]; #pragma unroll for (int r = 0; r < filterWidth-1; r++) { outputs[r] = 0; } if (threadIdx.x < filterWidth-1) { shFilter[threadIdx.x] = filter[threadIdx.x]; } __syncthreads(); if (imgIdx < numImages) { // This writes radius*2 = filterWidth - 1 values to outputs #pragma unroll for (int col = 0; col < radius; col++) { float px = imgs[0]; #pragma unroll for (int r = 0; r < radius + 1 + col; r++) { outputs[r] += px * shFilter[radius + col - r]; } imgs += horiz ? imgStride : imgStride * imgSize; } // Unfortunately this has to be at this level of granularity if (scaleTargets != 0) { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleTargets * target[0] + scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { float* t = &target[0]; t[0] = scaleTargets * t[0] + scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } else { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { target[0] = scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } } } /* * Block size B_YxB_X * blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, numOutputs, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int chansPerThread, bool checkCaseBounds> __global__ void kBedOfNails(float* imgs, float* target, const int imgSize, const int numChannels, const int numImages, const int startX, const int strideX, const int outputsX, const bool reverse, const float scaleTargets, const float scaleOutput) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numChanBlocks = DIVUP(numChannels, B_Y*chansPerThread); const int outputIdxX = blockIdx.x / numImgBlocks; const int outputIdxY = blockIdx.y / numChanBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockChanIdx = (blockIdx.y % numChanBlocks) * B_Y * chansPerThread; const int myChanIdx = (blockChanIdx + threadIdx.y*chansPerThread); if (myChanIdx >= numChannels) { return; } // if (blockIdx.x != 0 || blockIdx.y != 0) { // return; // } const int outputIdx = outputIdxY * outputsX + outputIdxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startImgPxX = startX + outputIdxX * strideX; const int startImgPxY = startX + outputIdxY * strideX; const int imgIdx = blockImgIdx + threadIdx.x; const int imgPx = startImgPxY * imgSize + startImgPxX; imgs += myChanIdx * imgPixels * numImages + imgPx * numImages + imgIdx; target += (myChanIdx * numOutputs + outputIdx) * numImages + imgIdx; if (scaleTargets != 0) { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleTargets * target[c * numOutputs * numImages + i * B_X] + scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleTargets * imgs[c * imgPixels * numImages + i * B_X] + scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } else { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } } /* * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, outputs, numImages) */ void _convBedOfNails(THCudaTensor* images, THCudaTensor* target, int numChannels, int imgSize, int startX, int strideX, bool reverse, float scaleTargets, float scaleOutput) { int numImages = reverse ? target->size[1] : images->size[1]; int imgPixels = imgSize * imgSize; THAssert(THCudaTensor_isContiguous(images)); THAssert(THCudaTensor_isContiguous(target)); THAssert(strideX > 1); int outputsX = DIVUP(imgSize, strideX); int outputs = outputsX * outputsX; if (reverse) { THAssert(target->size[0] == numChannels * outputs); } else { THAssert(images->size[0] == numChannels * imgPixels); } if (scaleTargets == 0) { if (reverse) { THCudaTensor_resize2d(images, numChannels * imgPixels, numImages); THCudaTensor_fill(images, 0); } else { THCudaTensor_resize2d(target, numChannels*outputs, numImages); } } else { if (reverse) { THAssert(images->size[0] == numChannels * outputs); THAssert(images->size[1] == numImages); } else { THAssert(target->size[0] == numChannels * outputs); THAssert(target->size[1] == numImages); } } int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; int chansPerThread = numChannels % 8 == 0 ? 2 : 1; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numChannels, 4 * chansPerThread) * outputsX); if (imgsPerThread == 4) { if (chansPerThread == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 1, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 1, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 2, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 2, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (chansPerThread == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 2, 1, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 2, 1, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 2, 2, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 2, 2, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } else { if (chansPerThread == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 1, 1, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 1, 1, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 1, 2, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 1, 2, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } } void convBedOfNails(THCudaTensor* images, THCudaTensor* target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(images, target, numChannels, imgSize, startX, strideX, false, scaleTargets, scaleOutput); } void convBedOfNailsUndo(THCudaTensor* actsGrad, THCudaTensor* target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(target, actsGrad, numChannels, imgSize, startX, strideX, true, scaleTargets, scaleOutput); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) */ void convGaussianBlur(THCudaTensor* images, THCudaTensor* filter, THCudaTensor* target, bool horiz, int numChannels, float scaleTargets, float scaleOutputs) { int numImages = images->size[1]; int radius = filter->size[1] / 2; int imgPixels = images->size[0] / numChannels; int imgSize = int(sqrt(imgPixels)); THAssert(imgPixels == imgSize * imgSize); THAssert(radius >= 1 && radius <= 4); THAssert(imgSize >= 2 * radius + 1); THAssert(filter->size[0] == 1); THAssert(images->size[0] == numChannels * imgPixels); THAssert(THCudaTensor_isContiguous(target)); if (scaleTargets == 0) { THCudaTensor_resizeAs(target, images); } else { THAssert(THCudaTensor_isSameSizeAs(target, images)); } dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, threads.x), DIVUP(numChannels*imgSize, threads.y)); if (radius == 1) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 1>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 1><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(filter), THCudaTensor_data(target), imgSize, numImages, images->stride[0], horiz, scaleTargets, scaleOutputs); } else if (radius == 2) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 2>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 2><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(filter), THCudaTensor_data(target), imgSize, numImages, images->stride[0], horiz, scaleTargets, scaleOutputs); } else if (radius == 3) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 3>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 3><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(filter), THCudaTensor_data(target), imgSize, numImages, images->stride[0], horiz, scaleTargets, scaleOutputs); } else if (radius == 4) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 4>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 4><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(filter), THCudaTensor_data(target), imgSize, numImages, images->stride[0], horiz, scaleTargets, scaleOutputs); } } /* * Block size 1x128 * blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread * blockIdx.y determines pixel.y * * So each block does one output for some number of images and all the fliters. * * threadIdx.x determines img idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int imgsPerThread, int numFilters, bool checkCaseBounds> __global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numImages, const int sizeX, const float addScale, const float powScale, const float minDiv) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y; const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += pxIdx * numImages + imgIdx; denoms += pxIdx * numImages + imgIdx; meanDiffs += imgIdx; target += pxIdx * numImages + imgIdx; float prod[numFilters][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = minDiv + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * 128] = prod[f][i]; target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale); } } } } /* * Block size B_YxB_X * blockIdx.x determines image idx in batches of B_X*imgsPerThread * blockIdx.y determines filter idx in batches of B_Y*filtersPerThread * blockIdx.z determines pixel * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale, const float minDiv) { const int imgPixels = imgSize * imgSize; const int pxIdxX = blockIdx.z % imgSize; const int pxIdxY = blockIdx.z / imgSize; const int blockImgIdx = blockIdx.x * B_X * imgsPerThread; const int blockFilterIdx = blockIdx.y * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = max(0, startPxY); const int loopStartX = max(0, startPxX); const int loopEndY = min(imgSize, startPxY + sizeX); const int loopEndX = min(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]); } } } } } #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[f][i] = minDiv + addScale * prod[f][i]; denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region of pixels for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale, const float minDiv) { __shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -sizeX/2 + blockPxX); const int startPxY = MAX(0, -sizeX/2 + blockPxY); const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3); const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -sizeX/2 + myPxY; const int myStartPxX = -sizeX/2 + myPxX; const int myEndPxY = myPxY + DIVUP(sizeX, 2); const int myEndPxX = myPxX + DIVUP(sizeX, 2); const int imgIdx = blockImgIdx + threadIdx.x; imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]); } } } } __syncthreads(); } } // imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX; // imgs += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = minDiv + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y */ template<int B_Y, int B_X, int imgsPerThread, bool checkCaseBounds, bool blocked> __global__ void kFCNorm(cudaTextureObject_t imgs, cudaTextureObject_t meanDiffs, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float addScale, const float powScale, const float minDiv) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; const int imgOffset = ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; const int meanDiffsOffset = pxIdx * numImages + imgIdx; // imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; // meanDiffs += pxIdx * numImages + imgIdx; target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; float prod[imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] = 0; } } const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx; const int loopStartF = blocked ? startF : MAX(0, startF); const int loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] += square(tex1Dfetch<float>(meanDiffs, meanDiffsOffset + f * imgPixels * numImages + i * B_X)); } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] = minDiv + addScale * prod[i]; target[i * B_X] = tex1Dfetch<float>(imgs, imgOffset + i * B_X) * __powf(prod[i], -powScale); } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numOutputs, imgPixels, numImages) * maxActs: (numOutputs, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds> __global__ void kCrossMapMaxPoolUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters, const int numImages, const int startF, const int poolSize, const int numOutputs, const int stride, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); // const int numOutputs = DIVUP(numFilters, stride); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int imgPixels = imgSize * imgSize; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; maxGrads += (/*(filterIdx) * imgPixels +*/ pxIdx) * numImages + imgIdx; maxActs += (/*(filterIdx) * imgPixels +*/ pxIdx) * numImages + imgIdx; target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; float prod[imgsPerThread]; // if (imgIdx != 0 || pxIdx != 0 || filterIdx != 0) { // return; // } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[i] = 0; } if (filterIdx < numFilters) { // const int startOut = max(0, (filterIdx-startF-poolSize)/ stride + 1); const int loopStartOut = max(0, (filterIdx-startF-poolSize)/ stride + 1); const int loopEndOut = min(numOutputs, (filterIdx - startF)/ stride + 1); for (int o = loopStartOut; o < loopEndOut; ++o) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float ma = maxActs[o * imgPixels * numImages + i * B_X]; const float mg = maxGrads[o * imgPixels * numImages + i * B_X]; const float img = imgs[i*B_X]; prod[i] += (img == ma) * mg; } } } // printf("gpu f start: %d, end: %d\n", loopStartF, loopEndF); if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { target[i * B_X] = prod[i]; } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i]; } } } } } /* * images: (numFilters, imgPixels, numImages) * maxGrads: (numOutputs, imgPixels, numImages) * maxActs: (numOutputs, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) */ void convCrossMapMaxPoolUndo(THCudaTensor* images, THCudaTensor* maxGrads, THCudaTensor* maxActs, THCudaTensor* target, const int imgSize, const int startF, const int poolSize, const int stride, const float scaleTargets, const float scaleOutputs) { int numImages = images->size[1]; int imgPixels = imgSize * imgSize; int numFilters = images->size[0] / imgPixels; int numOutputs = maxActs->size[0] / imgPixels; THAssert(images->size[0] == numFilters * imgPixels); THAssert(maxGrads->size[0] == numOutputs * imgPixels); THAssert(maxGrads->size[1] == numImages); THAssert(THCudaTensor_isSameSizeAs(maxGrads, maxActs)); THAssert(images->size[0] == numFilters * imgPixels); THAssert(THCudaTensor_isContiguous(images)); THAssert(THCudaTensor_isContiguous(maxGrads)); THAssert(THCudaTensor_isContiguous(maxActs)); THAssert(THCudaTensor_isSameSizeAs(maxGrads, maxActs)); // THAssert(numFilters % 16 == 0); // THAssert(numImages % 128 == 0); THAssert(stride <= poolSize); THAssert(startF <= 0); THAssert(startF + (numOutputs-1) * stride + poolSize >= numFilters); // All filters must be covered dim3 threads(32, 4); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; dim3 blocks(imgSize * DIVUP(numImages, threads.x * imgsPerThread), imgSize * DIVUP(numFilters, threads.y)); bool checkCaseBounds = numImages % (threads.x*imgsPerThread) != 0; if (scaleTargets == 0) { THCudaTensor_resizeAs(target, images); if (!checkCaseBounds) { if (imgsPerThread == 4) { kCrossMapMaxPoolUndo<4, 32, 4, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } else if (imgsPerThread == 2) { kCrossMapMaxPoolUndo<4, 32, 2, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } else { kCrossMapMaxPoolUndo<4, 32, 1, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } } else { kCrossMapMaxPoolUndo<4, 32, 1, false, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } } else { THAssert(THCudaTensor_isSameSizeAs(target, images)); if (!checkCaseBounds) { if (imgsPerThread == 4) { kCrossMapMaxPoolUndo<4, 32, 4, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } else if (imgsPerThread == 2) { kCrossMapMaxPoolUndo<4, 32, 2, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } else { kCrossMapMaxPoolUndo<4, 32, 1, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } } else { kCrossMapMaxPoolUndo<4, 32, 1, true, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, scaleTargets, scaleOutputs); } } getLastCudaError("convCrossMapMaxPoolUndo: kernel execution failed"); } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked> __global__ void kFRNormUndo(cudaTextureObject_t outGrads, cudaTextureObject_t denoms, cudaTextureObject_t inputs, cudaTextureObject_t acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float powScale, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int imgPixels = imgSize * imgSize; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; const int actsOffset = pxIdx * numImages + imgIdx; const int inputOffset = ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; target += inputOffset; float prod[imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[i] = 0; } const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx; const int loopStartF = blocked ? startF : MAX(0, startF); const int loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { prod[i] += tex1Dfetch<float>(acts, actsOffset + f * imgPixels * numImages + i * B_X); } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = tex1Dfetch<float>(inputs, inputOffset + i * B_X); const float out = tex1Dfetch<float>(outGrads, inputOffset + i * B_X); const float den = tex1Dfetch<float>(denoms, inputOffset + i * B_X); prod[i] = inp * prod[i] + out * __powf(den, -powScale); target[i * B_X] = prod[i]; } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = tex1Dfetch<float>(inputs, inputOffset + i * B_X); const float out = tex1Dfetch<float>(outGrads, inputOffset + i * B_X); const float den = tex1Dfetch<float>(denoms, inputOffset + i * B_X); prod[i] = inp * prod[i] + out * __powf(den, -powScale); target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i]; } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y * * TODO: this is pretty wasteful of computation. a lot of threads basically compute the same products. */ template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked> //__launch_bounds__(128,16) __global__ void kFRNormUndo2(cudaTextureObject_t outGrads, cudaTextureObject_t inputs, cudaTextureObject_t acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float addScale, const float powScale, const float minDiv, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/B_Y; const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y; const int imgPixels = imgSize * imgSize; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int imgIdx = blockImgIdx + threadIdx.x; const int inpOffset = pxIdx * numImages + imgIdx; const int outOffset = ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx; target += outOffset; float prod[imgsPerThread]; float denoms[imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[i] = 0; denoms[i] = 0; } int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx; int loopStartF = blocked ? startF : MAX(0, startF); int loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { // If an input is zero, then we shuldn't divide by it. const float grad = tex1Dfetch<float>(outGrads, inpOffset + f * imgPixels * numImages + i * B_X); const float act = tex1Dfetch<float>(acts, inpOffset + f * imgPixels * numImages + i * B_X); const float inp = tex1Dfetch<float>(inputs, inpOffset + f * imgPixels * numImages + i * B_X) + (act == 0); prod[i] += grad * act * __powf(__fdividef(act, inp), 1.0f/powScale); } } } startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx; loopStartF = blocked ? startF : MAX(0, startF); loopEndF = MIN(numFilters, startF + sizeF); for (int f = loopStartF; f < loopEndF; ++f) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { denoms[i] += square(tex1Dfetch<float>(inputs, inpOffset + f * imgPixels * numImages + i * B_X)); } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = tex1Dfetch<float>(inputs, outOffset + i * B_X); const float out = tex1Dfetch<float>(outGrads, outOffset + i * B_X); denoms[i] = addScale * denoms[i] + minDiv; prod[i] = (-2 * powScale * addScale * inp * prod[i] + out * __powf(denoms[i], -powScale)); target[i * B_X] = prod[i]; } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { const float inp = tex1Dfetch<float>(inputs, outOffset + i * B_X); const float out = tex1Dfetch<float>(outGrads, outOffset + i * B_X); denoms[i] = addScale * denoms[i] + minDiv; prod[i] = (-2 * powScale * addScale * inp * prod[i] + out * __powf(denoms[i], -powScale)); target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i]; } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalAvgUndo(float* avgGrads, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { for (int my = startOutputY; my < endOutputY; my++) { const float regionStartY = fmaxf(0, startX + my * strideX); const float regionEndY = fminf(imgSize, startX + my * strideX + subsX); const float regionSizeY = regionEndY - regionStartY; for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; const float regionStartX = fmaxf(0, startX + mx * strideX); const float regionEndX = fminf(imgSize, startX + mx * strideX + subsX); const float regionSizeX = regionEndX - regionStartX; // It's important to do the division here, because pushing division into the below // loops makes the code 4x slower. const float regionSizeInv = 1.0f / (regionSizeX * regionSizeY); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X] * regionSizeInv; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * maxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { __shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X]; } } } for (int my = startOutputY; my < endOutputY; my++) { for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i]; prod[f][i] += (img == ma) * mg; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * acts := -2 x scale x acts x outGrads / denoms */ template<int B_X, int eltsPerThread> __global__ void kRNormUndoPrelims(float* acts, cudaTextureObject_t denoms, cudaTextureObject_t outGrads, const uint numElements, const float scale) { const uint e = B_X * blockIdx.x * eltsPerThread + threadIdx.x; const uint numThreads = B_X * gridDim.x; for (uint i = e; i < numElements; i += numThreads*eltsPerThread) { #pragma unroll for (uint k = 0; k < eltsPerThread; k++) { if (i + k * B_X < numElements) { acts[i + k * B_X] = __fdividef(scale * tex1Dfetch<float>(outGrads, i + k * B_X) * acts[i + k * B_X], tex1Dfetch<float>(denoms, i + k * B_X)); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int imgPixels = imgSize * imgSize; const int startY = MAX(0, blockPxY + sizeX/2 - sizeX + 1); const int startX = MAX(0, blockPxX + sizeX/2 - sizeX + 1); const int endY = MIN(imgSize, blockPxY + sizeX/2 + 1); const int endX = MIN(imgSize, blockPxX + sizeX/2 + 1); const int imgIdx = blockImgIdx + threadIdx.x; acts += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; inputs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; outGrads += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int sy = startY; sy < endY; sy++) { for (int sx = startX; sx < endX; sx++) { const int outPx = sy * imgSize + sx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += acts[(f * B_Y * imgPixels + outPx) * numImages + i * B_X]; } } } } } // outGrads += blockPx * numImages; if (scaleTargets == 0) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kRNormUndo2(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { __shared__ float shActs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -DIVUP(sizeX,2) + blockPxX + 1); const int startPxY = MAX(0, -DIVUP(sizeX,2) + blockPxY + 1); const int endPxX = MIN(imgSize, blockPxX + sizeX/2 + 4); const int endPxY = MIN(imgSize, blockPxY + sizeX/2 + 4); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -DIVUP(sizeX,2) + myPxY + 1; const int myStartPxX = -DIVUP(sizeX,2) + myPxX + 1; const int myEndPxY = myPxY + sizeX/2 + 1; const int myEndPxX = myPxX + sizeX/2 + 1; const int imgIdx = blockImgIdx + threadIdx.x; acts += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; inputs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; outGrads += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shActs[ly + loadY][lx + loadX] = acts[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += shActs[f][threadIdx.x + i * B_X]; } } } } __syncthreads(); } } acts -= (loadY * imgPixels - myPxIdx) * numImages + loadX; acts += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = scaleTargets * target[f * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } } void convLocalMaxUndo(THCudaTensor* images, THCudaTensor* maxGrads, THCudaTensor* maxActs, THCudaTensor* target, int subsX, int startX, int strideX, int outputsX) { convLocalMaxUndo(images, maxGrads, maxActs, target, subsX, startX, strideX, outputsX, 0, 1); } /* * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalMaxUndo(THCudaTensor* images, THCudaTensor* maxGrads, THCudaTensor* maxActs, THCudaTensor* target, int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) { int outputs = outputsX * outputsX; int numImages = images->size[1]; int numFilters = maxGrads->size[0] / outputs; int imgPixels = images->size[0] / numFilters; THAssert(images->size[0] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); THAssert(imgSize * imgSize == imgPixels); THAssert(maxGrads->size[0] == numFilters * outputs); THAssert(maxGrads->size[1] == numImages); THAssert(THCudaTensor_isContiguous(images)); THAssert(THCudaTensor_isContiguous(maxGrads)); THAssert(THCudaTensor_isContiguous(maxActs)); THAssert(THCudaTensor_isSameSizeAs(maxGrads, maxActs)); THAssert(numFilters % 16 == 0); // THAssert(numImages % 128 == 0); THAssert(strideX <= subsX); THCudaTensor_resizeAs(target, images); THAssert(THCudaTensor_isContiguous(target)); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 4, 2, false, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 4, 2, true, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 4, 2, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 4, 2, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 2, 2, false, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 2, 2, true, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 2, 2, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 2, 2, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 1, 2, false, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 1, 2, true, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 1, 2, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 1, 2, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(maxGrads), THCudaTensor_data(maxActs), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } getLastCudaError("convLocalMaxUndo: kernel execution failed"); } void convLocalAvgUndo(THCudaTensor* avgGrads, THCudaTensor* target, int subsX, int startX, int strideX, int outputsX, int imgSize) { convLocalAvgUndo(avgGrads, target, subsX, startX, strideX, outputsX, imgSize, 0, 1); } /* * avgGrads: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalAvgUndo(THCudaTensor* avgGrads, THCudaTensor* target, int subsX, int startX, int strideX, int outputsX, int imgSize, float scaleTargets, float scaleOutput) { int numImages = avgGrads->size[1]; int outputs = outputsX * outputsX; int imgPixels = imgSize * imgSize; int numFilters = avgGrads->size[0] / outputs; THAssert(avgGrads->size[0] == numFilters * outputs); THAssert(THCudaTensor_isContiguous(avgGrads)); THAssert(numFilters % 16 == 0); // THAssert(numImages % 128 == 0); THAssert(strideX <= subsX); THCudaTensor_resize2d(target, numFilters * imgPixels, numImages); THAssert(THCudaTensor_isContiguous(target)); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; int checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 4)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 4, 4, false, true><<<blocks, threads, 0>>>(THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 4, 4, true, true><<<blocks, threads, 0>>>(THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 4, 4, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 4, 4, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 2, 4, false, true><<<blocks, threads, 0>>>(THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 2, 4, true, true><<<blocks, threads, 0>>>(THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 2, 4, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 2, 4, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 1, 4, false, true><<<blocks, threads, 0>>>(THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 1, 4, true, true><<<blocks, threads, 0>>>(THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 1, 4, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 1, 4, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(avgGrads), THCudaTensor_data(target), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } } getLastCudaError("convLocalAvgUndo: kernel execution failed"); } void convResponseNorm(THCudaTensor* images, THCudaTensor* denoms, THCudaTensor* target, int numFilters, int sizeX, float addScale, float powScale, float minDiv) { convContrastNorm(images, images, denoms, target, numFilters, sizeX, addScale, powScale, minDiv); } /* * images: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) */ void convContrastNorm(THCudaTensor* images, THCudaTensor* meanDiffs, THCudaTensor* denoms, THCudaTensor* target, int numFilters, int sizeX, float addScale, float powScale, float minDiv) { int numImages = images->size[1]; int imgPixels = images->size[0] / numFilters; THAssert(images->size[0] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); THAssert(imgSize * imgSize == imgPixels); THAssert(THCudaTensor_isSameSizeAs(meanDiffs, images)); THAssert(THCudaTensor_isContiguous(images)); THAssert(THCudaTensor_isContiguous(meanDiffs)); THAssert(numFilters % 16 == 0 || numFilters <= 8); THCudaTensor_resizeAs(target, images); THCudaTensor_resizeAs(denoms, images); THAssert(THCudaTensor_isContiguous(target)); if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = 8; int filtersPerThread = 4; int bx = 8; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; THAssert((imgsPerThread * bx) % 32 == 0); THAssert(numFilters % filtersPerThread == 0); dim3 threads(bx, 16); dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, cudaFuncCachePreferL1); // L1 faster here kCNorm2<8, 8, 4, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv); } else { cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, cudaFuncCachePreferL1); // L1 faster here kCNorm2<8, 8, 4, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv); } } else { bool checkCaseBounds = numImages % 128 != 0; if (numFilters <= 8) { dim3 threads(128); dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize); if (numFilters == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 1, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 1, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } else if (numFilters == 2) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 2, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 2, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } else if (numFilters == 3) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 3, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 3, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } else if (numFilters == 4) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 4, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 4, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } else if (numFilters == 5) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 5, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 5, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } else if (numFilters == 6) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 6, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 6, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } else if (numFilters == 7) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 7, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 7, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } else if (numFilters == 8) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 8, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 8, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numImages, sizeX, addScale, powScale, minDiv); } } } else { dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,threads.x*4), (numFilters / (threads.y * 2)), imgPixels); if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1); kCNorm_manyfilter<4, 32, 4, 2, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv); } else { cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1); kCNorm_manyfilter<4, 32, 4, 2, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv); } } } getLastCudaError("convResponseNorm: kernel execution failed"); } void convContrastNormUndo(THCudaTensor* outGrads, THCudaTensor* denoms, THCudaTensor* meanDiffs, THCudaTensor* acts, THCudaTensor* target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { convResponseNormUndo(outGrads, denoms, meanDiffs, acts, target, numFilters, sizeX, addScale, powScale, scaleTargets, scaleOutput); } /* * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * THIS WILL OVERWRITE THE ACTS MATRIX. */ void convResponseNormUndo(THCudaTensor* outGrads, THCudaTensor* denoms, THCudaTensor* inputs, THCudaTensor* acts, THCudaTensor* target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { int numImages = outGrads->size[1]; int imgPixels = outGrads->size[0] / numFilters; int imgSize = int(sqrt(imgPixels)); THAssert(imgSize * imgSize == imgPixels); THAssert(outGrads->size[0] == numFilters * imgPixels); THAssert(THCudaTensor_isSameSizeAs(denoms, outGrads)); THAssert(THCudaTensor_isSameSizeAs(acts, denoms)); THAssert(THCudaTensor_isContiguous(outGrads)); THAssert(numFilters % 16 == 0); THCudaTensor_resizeAs(target, outGrads); THAssert(THCudaTensor_isContiguous(target)); // First do acts := -2 x scale x acts x outGrads / denoms // so that the main routine only has to do an addition in its inner loop. int prelimEltsPerThread = 8; dim3 threads(128); dim3 blocks(DIVUP(THCudaTensor_nElement(outGrads),(threads.x * prelimEltsPerThread))); bool checkPrelimBounds = THCudaTensor_nElement(outGrads) % (threads.x * prelimEltsPerThread) != 0; //printf("num elts: %d, blocks: %d\n", outGrads.getNumElements(), blocks.x); cudaTextureObject_t texDenoms = THCudaTensor_getTextureObject(denoms); cudaTextureObject_t texOutGrads = THCudaTensor_getTextureObject(outGrads); kRNormUndoPrelims<128, 8><<<blocks, threads, 0>>>(THCudaTensor_data(acts), texDenoms, texOutGrads, THCudaTensor_nElement(outGrads), -2*addScale*powScale); checkCudaErrors(cudaDestroyTextureObject(texDenoms)); checkCudaErrors(cudaDestroyTextureObject(texOutGrads)); // Now the main routine if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) // NOTE: this stuff is not optimized for Kepler. Only kRNormUndo is. int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; int filtersPerThread = 4; int bx = 16; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; THAssert((imgsPerThread * bx) % 32 == 0); threads = dim3(bx, 16); blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (imgsPerThread == 8) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, true, true><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, false, true><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 4, 4, true, true><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 4, 4, false, true><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 4, 4, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 4, 4, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 2, 4, true, true><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 2, 4, false, true><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 2, 4, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 2, 4, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } } else { int imgsPerThread = numImages % 128 == 0 ? 4 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; threads = dim3(32, 4); blocks = dim3(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize); if (imgsPerThread == 4) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 4, 2, true>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 4, 2, true><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 4, 2, false>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 4, 2, false><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 1, 2, true><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 1, 2, false><<<blocks, threads, 0>>>(THCudaTensor_data(outGrads), THCudaTensor_data(denoms), THCudaTensor_data(inputs), THCudaTensor_data(acts), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } getLastCudaError("kRNormUndo: kernel execution failed"); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) * * imgSize = scale * tgtSize */ void convResizeBilinear(THCudaTensor* images, THCudaTensor* target, int imgSize, int tgtSize, float scale) { int imgPixels = imgSize * imgSize; int tgtPixels = tgtSize * tgtSize; int numChannels = images->size[0] / imgPixels; int numImages = images->size[1]; THAssert(images->size[0] == numChannels * imgPixels); THCudaTensor_resize2d(target, numChannels * tgtPixels, numImages); THAssert(THCudaTensor_isContiguous(target)); int numChunksX = DIVUP(tgtSize, 4); int numChunks = numChunksX * numChunksX; double imgCenter = imgSize * 0.5; double tgtCenter = tgtSize * 0.5; double centerScale = imgCenter - tgtCenter * scale; int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 16); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), numChannels * numChunks); if (imgsPerThread == 4) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kResizeBilinear<4, true>, cudaFuncCachePreferL1); kResizeBilinear<4, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, tgtSize, numImages, images->stride[0], scale, centerScale); } else { cudaFuncSetCacheConfig(kResizeBilinear<4, false>, cudaFuncCachePreferL1); kResizeBilinear<4, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, tgtSize, numImages, images->stride[0], scale, centerScale); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kResizeBilinear<2, true>, cudaFuncCachePreferL1); kResizeBilinear<2, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, tgtSize, numImages, images->stride[0], scale, centerScale); } else { cudaFuncSetCacheConfig(kResizeBilinear<2, false>, cudaFuncCachePreferL1); kResizeBilinear<2, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, tgtSize, numImages, images->stride[0], scale, centerScale); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kResizeBilinear<1, true>, cudaFuncCachePreferL1); kResizeBilinear<1, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, tgtSize, numImages, images->stride[0], scale, centerScale); } else { cudaFuncSetCacheConfig(kResizeBilinear<1, false>, cudaFuncCachePreferL1); kResizeBilinear<1, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgSize, tgtSize, numImages, images->stride[0], scale, centerScale); } } getLastCudaError("convResizeBilinear: kernel execution failed"); } /* * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) */ void convRGBToYUV(THCudaTensor* images, THCudaTensor* target) { int imgPixels = images->size[0] / 3; int numImages = images->size[1]; THAssert(images->size[0] == 3 * imgPixels); THCudaTensor_resize2d(target, 3 * imgPixels, numImages); THAssert(THCudaTensor_isContiguous(target)); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4)); if (imgsPerThread == 4) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToYUV<4, true>, cudaFuncCachePreferL1); kRGBToYUV<4, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { cudaFuncSetCacheConfig(kRGBToYUV<4, false>, cudaFuncCachePreferL1); kRGBToYUV<4, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToYUV<2, true>, cudaFuncCachePreferL1); kRGBToYUV<2, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { cudaFuncSetCacheConfig(kRGBToYUV<2, false>, cudaFuncCachePreferL1); kRGBToYUV<2, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToYUV<1, true>, cudaFuncCachePreferL1); kRGBToYUV<1, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { cudaFuncSetCacheConfig(kRGBToYUV<1, false>, cudaFuncCachePreferL1); kRGBToYUV<1, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } getLastCudaError("convRGBToYUV: kernel execution failed"); } /* * imgs: (3, imgPixels, numImages) with given imgStride * target: (3, imgPixels, numImages) */ void convRGBToLAB(THCudaTensor* images, THCudaTensor* target, bool center) { int imgPixels = images->size[0] / 3; int numImages = images->size[1]; THAssert(images->size[0] == 3 * imgPixels); THCudaTensor_resize2d(target, 3 * imgPixels, numImages); THAssert(THCudaTensor_isContiguous(target)); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4)); if (imgsPerThread == 4) { if (center) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<4, true, true>, cudaFuncCachePreferL1); kRGBToLAB<4, true, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { cudaFuncSetCacheConfig(kRGBToLAB<4, false, true>, cudaFuncCachePreferL1); kRGBToLAB<4, false, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<4, true, false>, cudaFuncCachePreferL1); kRGBToLAB<4, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { cudaFuncSetCacheConfig(kRGBToLAB<4, false, false>, cudaFuncCachePreferL1); kRGBToLAB<4, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } } else if (imgsPerThread == 2) { if (center) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<2, true, true>, cudaFuncCachePreferL1); kRGBToLAB<2, true, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { cudaFuncSetCacheConfig(kRGBToLAB<2, false, true>, cudaFuncCachePreferL1); kRGBToLAB<2, false, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<2, true, false>, cudaFuncCachePreferL1); kRGBToLAB<2, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { cudaFuncSetCacheConfig(kRGBToLAB<2, false, false>, cudaFuncCachePreferL1); kRGBToLAB<2, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } } else { if (center) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<1, true, true>, cudaFuncCachePreferL1); kRGBToLAB<1, true, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { cudaFuncSetCacheConfig(kRGBToLAB<1, false, true>, cudaFuncCachePreferL1); kRGBToLAB<1, false, true><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kRGBToLAB<1, true, false>, cudaFuncCachePreferL1); kRGBToLAB<1, true, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } else { cudaFuncSetCacheConfig(kRGBToLAB<1, false, false>, cudaFuncCachePreferL1); kRGBToLAB<1, false, false><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(target), imgPixels, numImages, images->stride[0]); } } } getLastCudaError("convRGBToLAB: kernel execution failed"); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * target: (numChannels, tgtPixels, numImages) */ void convCrop(THCudaTensor* imgs, THCudaTensor* target, int imgSize, int tgtSize, int startY, int startX) { int numImages = imgs->size[1]; int imgPixels = imgSize * imgSize; int tgtPixels = tgtSize * tgtSize; int numChannels = imgs->size[0] / imgPixels; THAssert(imgs->size[0] == imgPixels * numChannels); THAssert(imgPixels == imgSize * imgSize); THAssert(imgSize - startY >= tgtSize); THAssert(imgSize - startX >= tgtSize); THAssert(startY >= 0); THAssert(startX >= 0); THCudaTensor_resize2d(target, numChannels * tgtPixels, numImages); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkCaseBounds = numImages % (32*imgsPerThread) != 0; dim3 blocks(DIVUP(numImages, 32 * imgsPerThread), numChannels * DIVUP(tgtPixels, 4)); dim3 threads(32, 4); if (imgsPerThread == 4) { if (checkCaseBounds) { kCrop<4, true><<<blocks, threads, 0>>>(THCudaTensor_data(imgs), THCudaTensor_data(target), numImages, imgs->stride[0], imgSize, tgtSize, startY, startX); } else { kCrop<4, false><<<blocks, threads, 0>>>(THCudaTensor_data(imgs), THCudaTensor_data(target), numImages, imgs->stride[0], imgSize, tgtSize, startY, startX); } } else if (imgsPerThread == 2) { if (checkCaseBounds) { kCrop<2, true><<<blocks, threads, 0>>>(THCudaTensor_data(imgs), THCudaTensor_data(target), numImages, imgs->stride[0], imgSize, tgtSize, startY, startX); } else { kCrop<2, false><<<blocks, threads, 0>>>(THCudaTensor_data(imgs), THCudaTensor_data(target), numImages, imgs->stride[0], imgSize, tgtSize, startY, startX); } } else { if (checkCaseBounds) { kCrop<1, true><<<blocks, threads, 0>>>(THCudaTensor_data(imgs), THCudaTensor_data(target), numImages, imgs->stride[0], imgSize, tgtSize, startY, startX); } else { kCrop<1, false><<<blocks, threads, 0>>>(THCudaTensor_data(imgs), THCudaTensor_data(target), numImages, imgs->stride[0], imgSize, tgtSize, startY, startX); } } getLastCudaError("convCrop: kernel execution failed"); } /* * images: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * Note: at present, I have no code to compute the meanDiffs. So it should be set * to be equal to images. In other words, this isn't really doing contrast normalization, * just response normalization. */ void convContrastNormCrossMap(THCudaTensor* images, THCudaTensor* meanDiffs, THCudaTensor* target, int numFilters, int sizeF, float addScale, float powScale, float minDiv, bool blocked) { int numImages = images->size[1]; int imgPixels = images->size[0] / numFilters; THAssert(images->size[0] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); THAssert(imgSize * imgSize == imgPixels); THAssert(THCudaTensor_isSameSizeAs(meanDiffs, images)); THAssert(sizeF > 0 && sizeF <= numFilters); THAssert(THCudaTensor_isContiguous(images)); THAssert(THCudaTensor_isContiguous(meanDiffs)); THAssert(numFilters % 16 == 0); THCudaTensor_resizeAs(target, images); // THCudaTensor_resizeAs(denoms, images); THAssert(THCudaTensor_isContiguous(target)); bool checkCaseBounds = numImages % 128 != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize); // printf("convContrastNormCrossMap imgs: %p, meanDiffs: %p, denoms: %p, target: %p, imgSize: %d, numFilters: %d, numImages: %d, sizeF: %d, addScale: %f, powScale: %f, minDiv: %f, blocked: %d\n", // THCudaTensor_data(images), THCudaTensor_data(meanDiffs), THCudaTensor_data(denoms), THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, blocked); cudaTextureObject_t texImages = THCudaTensor_getTextureObject(images); cudaTextureObject_t texMeanDiffs = THCudaTensor_getTextureObject(meanDiffs); if (blocked) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, true, true>, cudaFuncCachePreferL1); kFCNorm<4, 32, 4, true, true><<<blocks, threads, 0>>>(texImages, texMeanDiffs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv); } else { cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, false, true>, cudaFuncCachePreferL1); kFCNorm<4, 32, 4, false, true><<<blocks, threads, 0>>>(texImages, texMeanDiffs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, true, false>, cudaFuncCachePreferL1); kFCNorm<4, 32, 4, true, false><<<blocks, threads, 0>>>(texImages, texMeanDiffs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv); } else { cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, false, false>, cudaFuncCachePreferL1); kFCNorm<4, 32, 4, false, false><<<blocks, threads, 0>>>(texImages, texMeanDiffs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv); } } checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texMeanDiffs)); getLastCudaError("convContrastNormCrossMap: kernel execution failed"); } /* * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * THIS WILL OVERWRITE THE ACTS MATRIX. */ void convResponseNormCrossMapUndo(THCudaTensor* outGrads, THCudaTensor* inputs, THCudaTensor* acts, THCudaTensor* target, int numFilters, int sizeF, float addScale, float powScale, float minDiv, bool blocked, float scaleTargets, float scaleOutput) { int numImages = outGrads->size[1]; int imgPixels = outGrads->size[0] / numFilters; int imgSize = int(sqrt(imgPixels)); THAssert(imgSize * imgSize == imgPixels); THAssert(sizeF > 0 && sizeF <= numFilters); THAssert(outGrads->size[0] == numFilters * imgPixels); THAssert(THCudaTensor_isContiguous(outGrads)); THAssert(numFilters % 16 == 0); THCudaTensor_resizeAs(target, outGrads); THAssert(THCudaTensor_isContiguous(target)); dim3 threads2 = dim3(32, 4); dim3 blocks2 = dim3(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize); bool checkCaseBounds = (numImages % 128) != 0; cudaTextureObject_t texOutGrads = THCudaTensor_getTextureObject(outGrads); cudaTextureObject_t texInputs = THCudaTensor_getTextureObject(inputs); cudaTextureObject_t texActs = THCudaTensor_getTextureObject(acts); if (blocked) { if (scaleTargets == 0 && scaleOutput == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, true, true>, cudaFuncCachePreferL1); kFRNormUndo2<4, 32, 4, false, true, true><<<blocks2, threads2, 0>>>(texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, false, true>, cudaFuncCachePreferL1); kFRNormUndo2<4, 32, 4, false, false, true><<<blocks2, threads2, 0>>>(texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, true, true>, cudaFuncCachePreferL1); kFRNormUndo2<4, 32, 4, true, true, true><<<blocks2, threads2, 0>>>(texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, false, true>, cudaFuncCachePreferL1); kFRNormUndo2<4, 32, 4, true, false, true><<<blocks2, threads2, 0>>>(texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } } } else { if (scaleTargets == 0 && scaleOutput == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, true, false>, cudaFuncCachePreferL1); kFRNormUndo2<4, 32, 4, false, true, false><<<blocks2, threads2, 0>>>(texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, false, false>, cudaFuncCachePreferL1); kFRNormUndo2<4, 32, 4, false, false, false><<<blocks2, threads2, 0>>>(texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, true, false>, cudaFuncCachePreferL1); kFRNormUndo2<4, 32, 4, true, true, false><<<blocks2, threads2, 0>>>(texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, false, false>, cudaFuncCachePreferL1); kFRNormUndo2<4, 32, 4, true, false, false><<<blocks2, threads2, 0>>>(texOutGrads, texInputs, texActs, THCudaTensor_data(target), imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, scaleTargets, scaleOutput); } } } checkCudaErrors(cudaDestroyTextureObject(texOutGrads)); checkCudaErrors(cudaDestroyTextureObject(texInputs)); checkCudaErrors(cudaDestroyTextureObject(texActs)); getLastCudaError("convResponseNormCrossMapUndo: kernel execution failed"); } void convResponseNormCrossMap(THCudaTensor* images, THCudaTensor* target, int numFilters, int sizeF, float addScale, float powScale, float minDiv, bool blocked) { convContrastNormCrossMap(images, images, target, numFilters, sizeF, addScale, powScale, minDiv, blocked); } /* * images: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) */ void convResponseNormCrossMap(THCudaTensor* images, THCudaTensor* target, int numFilters, int sizeF, float addScale, float powScale, bool blocked) { convContrastNormCrossMap(images, images, target, numFilters, sizeF, addScale, powScale, 1, blocked); } void convLocalMaxPool(THCudaTensor* images, THCudaTensor* target, int numFilters, int subsX, int startX, int strideX, int outputsX) { MaxPooler pooler; convLocalPool(images, target, numFilters, subsX, startX, strideX, outputsX, pooler); } void convLocalAvgPool(THCudaTensor* images, THCudaTensor* target, int numFilters, int subsX, int startX, int strideX, int outputsX) { AvgPooler pooler; convLocalPool(images, target, numFilters, subsX, startX, strideX, outputsX, pooler); } void convCrossMapMaxPool(THCudaTensor* images, THCudaTensor* target, const int startF, const int poolSize, const int numOutputs, const int stride, const int imgSize) { MaxPooler pooler; convPoolCrossMap(images, target, startF, poolSize, numOutputs, stride, imgSize, pooler); }
7012bb49d28cda451e768337efe43c49f8144968.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdint.h> #define CHECK(call)\ {\ const hipError_t error = call;\ if (error != hipSuccess)\ {\ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__);\ fprintf(stderr, "code: %d, reason: %s\n", error,\ hipGetErrorString(error));\ exit(EXIT_FAILURE);\ }\ } struct GpuTimer { hipEvent_t start; hipEvent_t stop; GpuTimer() { hipEventCreate(&start); hipEventCreate(&stop); } ~GpuTimer() { hipEventDestroy(start); hipEventDestroy(stop); } void Start() { hipEventRecord(start, 0); hipEventSynchronize(start); } void Stop() { hipEventRecord(stop, 0); } float Elapsed() { float elapsed; hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); return elapsed; } }; void readPnm(char * fileName, int &numChannels, int &width, int &height, uint8_t * &pixels) { FILE * f = fopen(fileName, "r"); if (f == NULL) { printf("Cannot read %s\n", fileName); exit(EXIT_FAILURE); } char type[3]; fscanf(f, "%s", type); if (strcmp(type, "P2") == 0) numChannels = 1; else if (strcmp(type, "P3") == 0) numChannels = 3; else // In this exercise, we don't touch other types { fclose(f); printf("Cannot read %s\n", fileName); exit(EXIT_FAILURE); } fscanf(f, "%i", &width); fscanf(f, "%i", &height); int max_val; fscanf(f, "%i", &max_val); if (max_val > 255) // In this exercise, we assume 1 byte per value { fclose(f); printf("Cannot read %s\n", fileName); exit(EXIT_FAILURE); } pixels = (uint8_t *)malloc(width * height * numChannels); for (int i = 0; i < width * height * numChannels; i++) fscanf(f, "%hhu", &pixels[i]); fclose(f); } void writePnm(uint8_t * pixels, int numChannels, int width, int height, char * fileName) { FILE * f = fopen(fileName, "w"); if (f == NULL) { printf("Cannot write %s\n", fileName); exit(EXIT_FAILURE); } if (numChannels == 1) fprintf(f, "P2\n"); else if (numChannels == 3) fprintf(f, "P3\n"); else { fclose(f); printf("Cannot write %s\n", fileName); exit(EXIT_FAILURE); } fprintf(f, "%i\n%i\n255\n", width, height); for (int i = 0; i < width * height * numChannels; i++) fprintf(f, "%hhu\n", pixels[i]); fclose(f); } __global__ void convertRgb2GrayKernel(uint8_t * inPixels, int width, int height, uint8_t * outPixels) { // TODO int iy = blockDim.y * blockIdx.y + threadIdx.y; int ix = blockDim.x * blockIdx.x + threadIdx.x; if (ix < width && iy <height) { int i = iy* width +ix; outPixels[i] = inPixels[3*i]*0.299 + inPixels[3*i +1]*0.587 + inPixels[3*i +2]*0.114; } } void convertRgb2Gray(uint8_t * inPixels, int width, int height, uint8_t * outPixels, bool useDevice=false, dim3 blockSize=dim3(1)) { GpuTimer timer; timer.Start(); if (useDevice == false) { for (int r = 0; r < height; r++) { for (int c = 0; c < width; c++) { int i = r * width + c; uint8_t r = inPixels[i * 3]; uint8_t g = inPixels[i * 3 + 1]; uint8_t b = inPixels[i * 3 + 2]; outPixels[i] = 0.299f * r + 0.587f * g + 0.114f * b; } } } else // use device { hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, 0); printf("GPU name: %s\n", devProp.name); printf("GPU compute capability: %d.%d\n", devProp.major, devProp.minor); // TODO: Allocate device memories uint8_t *d_inpixels, *d_outpixels; CHECK(hipMalloc(&d_inpixels, width * height * 3 * sizeof(uint8_t))); CHECK(hipMalloc(&d_outpixels, width * height * sizeof(uint8_t))); // TODO: Copy data to device memories CHECK(hipMemcpy(d_inpixels,inPixels,width * height*3 * sizeof(uint8_t),hipMemcpyHostToDevice)); // TODO: Set grid size and call kernel (remember to check kernel error) dim3 gridSize((width-1)/blockSize.x + 1,(height-1)/blockSize.y + 1); hipLaunchKernelGGL(( convertRgb2GrayKernel), dim3(gridSize),dim3(blockSize), 0, 0, d_inpixels,width,height,d_outpixels); hipDeviceSynchronize(); CHECK(hipGetLastError()); // TODO: Copy result from device memories CHECK(hipMemcpy(outPixels,d_outpixels, width*height*sizeof(uint8_t),hipMemcpyDeviceToHost)); // TODO: Free device memories hipFree(d_outpixels); hipFree(d_inpixels); // for (int i = 0; i < width*height*3; i++) // printf("%d\t",d_inpixels[i]); } timer.Stop(); float time = timer.Elapsed(); printf("Processing time (%s): %f ms\n\n", useDevice == true? "use device" : "use host", time); } float computeError(uint8_t * a1, uint8_t * a2, int n) { float err = 0; for (int i = 0; i < n; i++) err += abs((int)a1[i] - (int)a2[i]); err /= n; return err; } char * concatStr(const char * s1, const char * s2) { char * result = (char *)malloc(strlen(s1) + strlen(s2) + 1); strcpy(result, s1); strcat(result, s2); return result; } int main(int argc, char ** argv) { if (argc != 3 && argc != 5) { printf("The number of arguments is invalid\n"); return EXIT_FAILURE; } // Read input RGB image file int numChannels, width, height; uint8_t * inPixels; readPnm(argv[1], numChannels, width, height, inPixels); if (numChannels != 3) return EXIT_FAILURE; // Input image must be RGB printf("Image size (width x height): %i x %i\n\n", width, height); // Convert RGB to grayscale not using device uint8_t * correctOutPixels= (uint8_t *)malloc(width * height); convertRgb2Gray(inPixels, width, height, correctOutPixels); // Convert RGB to grayscale using device uint8_t * outPixels= (uint8_t *)malloc(width * height); dim3 blockSize(32, 32); // Default if (argc == 5) { blockSize.x = atoi(argv[3]); blockSize.y = atoi(argv[4]); } convertRgb2Gray(inPixels, width, height, outPixels, true, blockSize); // Compute mean absolute error between host result and device result float err = computeError(outPixels, correctOutPixels, width * height); printf("Error between device result and host result: %f\n", err); // Write results to files char * outFileNameBase = strtok(argv[2], "."); // Get rid of extension writePnm(correctOutPixels, 1, width, height, concatStr(outFileNameBase, "_host.pnm")); writePnm(outPixels, 1, width, height, concatStr(outFileNameBase, "_device.pnm")); // Free memories free(inPixels); free(outPixels); }
7012bb49d28cda451e768337efe43c49f8144968.cu
#include <stdio.h> #include <stdint.h> #define CHECK(call)\ {\ const cudaError_t error = call;\ if (error != cudaSuccess)\ {\ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__);\ fprintf(stderr, "code: %d, reason: %s\n", error,\ cudaGetErrorString(error));\ exit(EXIT_FAILURE);\ }\ } struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start, 0); cudaEventSynchronize(start); } void Stop() { cudaEventRecord(stop, 0); } float Elapsed() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; void readPnm(char * fileName, int &numChannels, int &width, int &height, uint8_t * &pixels) { FILE * f = fopen(fileName, "r"); if (f == NULL) { printf("Cannot read %s\n", fileName); exit(EXIT_FAILURE); } char type[3]; fscanf(f, "%s", type); if (strcmp(type, "P2") == 0) numChannels = 1; else if (strcmp(type, "P3") == 0) numChannels = 3; else // In this exercise, we don't touch other types { fclose(f); printf("Cannot read %s\n", fileName); exit(EXIT_FAILURE); } fscanf(f, "%i", &width); fscanf(f, "%i", &height); int max_val; fscanf(f, "%i", &max_val); if (max_val > 255) // In this exercise, we assume 1 byte per value { fclose(f); printf("Cannot read %s\n", fileName); exit(EXIT_FAILURE); } pixels = (uint8_t *)malloc(width * height * numChannels); for (int i = 0; i < width * height * numChannels; i++) fscanf(f, "%hhu", &pixels[i]); fclose(f); } void writePnm(uint8_t * pixels, int numChannels, int width, int height, char * fileName) { FILE * f = fopen(fileName, "w"); if (f == NULL) { printf("Cannot write %s\n", fileName); exit(EXIT_FAILURE); } if (numChannels == 1) fprintf(f, "P2\n"); else if (numChannels == 3) fprintf(f, "P3\n"); else { fclose(f); printf("Cannot write %s\n", fileName); exit(EXIT_FAILURE); } fprintf(f, "%i\n%i\n255\n", width, height); for (int i = 0; i < width * height * numChannels; i++) fprintf(f, "%hhu\n", pixels[i]); fclose(f); } __global__ void convertRgb2GrayKernel(uint8_t * inPixels, int width, int height, uint8_t * outPixels) { // TODO int iy = blockDim.y * blockIdx.y + threadIdx.y; int ix = blockDim.x * blockIdx.x + threadIdx.x; if (ix < width && iy <height) { int i = iy* width +ix; outPixels[i] = inPixels[3*i]*0.299 + inPixels[3*i +1]*0.587 + inPixels[3*i +2]*0.114; } } void convertRgb2Gray(uint8_t * inPixels, int width, int height, uint8_t * outPixels, bool useDevice=false, dim3 blockSize=dim3(1)) { GpuTimer timer; timer.Start(); if (useDevice == false) { for (int r = 0; r < height; r++) { for (int c = 0; c < width; c++) { int i = r * width + c; uint8_t r = inPixels[i * 3]; uint8_t g = inPixels[i * 3 + 1]; uint8_t b = inPixels[i * 3 + 2]; outPixels[i] = 0.299f * r + 0.587f * g + 0.114f * b; } } } else // use device { cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, 0); printf("GPU name: %s\n", devProp.name); printf("GPU compute capability: %d.%d\n", devProp.major, devProp.minor); // TODO: Allocate device memories uint8_t *d_inpixels, *d_outpixels; CHECK(cudaMalloc(&d_inpixels, width * height * 3 * sizeof(uint8_t))); CHECK(cudaMalloc(&d_outpixels, width * height * sizeof(uint8_t))); // TODO: Copy data to device memories CHECK(cudaMemcpy(d_inpixels,inPixels,width * height*3 * sizeof(uint8_t),cudaMemcpyHostToDevice)); // TODO: Set grid size and call kernel (remember to check kernel error) dim3 gridSize((width-1)/blockSize.x + 1,(height-1)/blockSize.y + 1); convertRgb2GrayKernel<<<gridSize,blockSize>>>(d_inpixels,width,height,d_outpixels); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); // TODO: Copy result from device memories CHECK(cudaMemcpy(outPixels,d_outpixels, width*height*sizeof(uint8_t),cudaMemcpyDeviceToHost)); // TODO: Free device memories cudaFree(d_outpixels); cudaFree(d_inpixels); // for (int i = 0; i < width*height*3; i++) // printf("%d\t",d_inpixels[i]); } timer.Stop(); float time = timer.Elapsed(); printf("Processing time (%s): %f ms\n\n", useDevice == true? "use device" : "use host", time); } float computeError(uint8_t * a1, uint8_t * a2, int n) { float err = 0; for (int i = 0; i < n; i++) err += abs((int)a1[i] - (int)a2[i]); err /= n; return err; } char * concatStr(const char * s1, const char * s2) { char * result = (char *)malloc(strlen(s1) + strlen(s2) + 1); strcpy(result, s1); strcat(result, s2); return result; } int main(int argc, char ** argv) { if (argc != 3 && argc != 5) { printf("The number of arguments is invalid\n"); return EXIT_FAILURE; } // Read input RGB image file int numChannels, width, height; uint8_t * inPixels; readPnm(argv[1], numChannels, width, height, inPixels); if (numChannels != 3) return EXIT_FAILURE; // Input image must be RGB printf("Image size (width x height): %i x %i\n\n", width, height); // Convert RGB to grayscale not using device uint8_t * correctOutPixels= (uint8_t *)malloc(width * height); convertRgb2Gray(inPixels, width, height, correctOutPixels); // Convert RGB to grayscale using device uint8_t * outPixels= (uint8_t *)malloc(width * height); dim3 blockSize(32, 32); // Default if (argc == 5) { blockSize.x = atoi(argv[3]); blockSize.y = atoi(argv[4]); } convertRgb2Gray(inPixels, width, height, outPixels, true, blockSize); // Compute mean absolute error between host result and device result float err = computeError(outPixels, correctOutPixels, width * height); printf("Error between device result and host result: %f\n", err); // Write results to files char * outFileNameBase = strtok(argv[2], "."); // Get rid of extension writePnm(correctOutPixels, 1, width, height, concatStr(outFileNameBase, "_host.pnm")); writePnm(outPixels, 1, width, height, concatStr(outFileNameBase, "_device.pnm")); // Free memories free(inPixels); free(outPixels); }
9d018e28139dc6606a173ab1a5cd52c7e30aaa18.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * _tt_line_project_ray_gpu_kernels.cu * * NiftyRec * Stefano Pedemonte, May 2012. * CMIC - Centre for Medical Image Computing * UCL - University College London. * Released under BSD licence, see LICENSE.txt */ #ifndef _TTPROJECTRAY_KERNEL_CU_ #define _TTPROJECTRAY_KERNEL_CU_ //#include <cutil_inline.h> #include "_reg_blocksize_gpu.h" #include <hip/hip_vector_types.h> #include <vector_functions.h> #include <driver_functions.h> #include <cutil_math.h> #include <_tt_common.h> #define MAX_STEPS 1000000000 hipArray *d_volumeArray = 0; //texture<VolumeType, 3, hipReadModeNormalizedFloat> tex; // 3D texture texture<VolumeType, 3, hipReadModeElementType> tex; // 3D texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; // intersect ray with a box // http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __global__ void d_tt_line_project_ray_gpu(float *d_output, float3 sourcePosition, float3 volumeSize, uint imageWidthPixels, uint imageHeightPixels, float tStep, int interpolation) { const uint image_width_pixels = imageWidthPixels; const uint image_height_pixels = imageHeightPixels; const float3 volume_size = volumeSize; const float3 source_position = sourcePosition; const float tstep = tStep; const int maxSteps = MAX_STEPS; //const int maxSteps = MAX_STEPS; //(volume_size.x^2+volume_size.y^2+volume_size.z^2)^0.5f/tStep; //diagonal of the bounding box const float3 boxMin = make_float3(0.0f, 0.0f, 0.0f); const float3 boxMax = make_float3(volume_size.x, volume_size.y, volume_size.z); const float3 rec_volume_size = 1.0f/volume_size; //x and y index detector pixel uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= image_width_pixels) || (y >= image_height_pixels)) return; //u and v are in normalized detector pixel [0,0]->[1,1] float u = (x / (float) image_width_pixels); float v = (y / (float) image_height_pixels); // calculate eye ray in world space Ray eyeRay; eyeRay.o = source_position; //transform and normalize direction vector eyeRay.d = normalize(make_float3(mul(c_invViewMatrix, make_float4(u,v,0.0f,1.0f)))-eyeRay.o); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating float sum; float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for(int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] coordinates //float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); float sample = tex3D(tex, pos.x*rec_volume_size.x, pos.y*rec_volume_size.y, pos.z*rec_volume_size.z); sum = sum + sample; t += tstep; if (t > tfar) break; pos += step; } d_output[y*image_width_pixels + x] = sum; } extern "C" void setTextureFilterMode(bool bLinearFilter) { tex.filterMode = bLinearFilter ? hipFilterModeLinear : hipFilterModePoint; } extern "C" void initCuda(void *h_volume, hipExtent volumeSize) { // create 3D array hipChannelFormatDesc channelDesc = hipCreateChannelDesc<VolumeType>(); CUDA_SAFE_CALL( hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize) ); // copy data to 3D array hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyHostToDevice; CUDA_SAFE_CALL( hipMemcpy3D(&copyParams) ); // set texture parameters tex.normalized = true; // access with normalized texture coordinates tex.filterMode = hipFilterModeLinear; // linear interpolation tex.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates tex.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture CUDA_SAFE_CALL(hipBindTextureToArray(tex, d_volumeArray, channelDesc)); } extern "C" void freeCudaBuffers() { CUDA_SAFE_CALL(hipFreeArray(d_volumeArray)); } extern "C" void tt_line_project_ray_gpu(dim3 gridSize, dim3 blockSize, float *d_output, float3 source_position, float3 volume_size, uint imageW, uint imageH, float t_step, int interpolation) { hipLaunchKernelGGL(( d_tt_line_project_ray_gpu), dim3(gridSize), dim3(blockSize), 0, 0, d_output, source_position, volume_size, imageW, imageH, t_step, interpolation); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { /* fprintf(stderr,"\nMatrix:"); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[0],invViewMatrix[1],invViewMatrix[3],invViewMatrix[3]); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[4],invViewMatrix[5],invViewMatrix[6],invViewMatrix[7]); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[8],invViewMatrix[9],invViewMatrix[10],invViewMatrix[11]); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[12],invViewMatrix[13],invViewMatrix[14],invViewMatrix[15]);*/ CUDA_SAFE_CALL( hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix) ); } #endif // #ifndef _TTPROJECTRAY_KERNEL_CU_
9d018e28139dc6606a173ab1a5cd52c7e30aaa18.cu
/* * _tt_line_project_ray_gpu_kernels.cu * * NiftyRec * Stefano Pedemonte, May 2012. * CMIC - Centre for Medical Image Computing * UCL - University College London. * Released under BSD licence, see LICENSE.txt */ #ifndef _TTPROJECTRAY_KERNEL_CU_ #define _TTPROJECTRAY_KERNEL_CU_ //#include <cutil_inline.h> #include "_reg_blocksize_gpu.h" #include <vector_types.h> #include <vector_functions.h> #include <driver_functions.h> #include <cutil_math.h> #include <_tt_common.h> #define MAX_STEPS 1000000000 cudaArray *d_volumeArray = 0; //texture<VolumeType, 3, cudaReadModeNormalizedFloat> tex; // 3D texture texture<VolumeType, 3, cudaReadModeElementType> tex; // 3D texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; // intersect ray with a box // http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __global__ void d_tt_line_project_ray_gpu(float *d_output, float3 sourcePosition, float3 volumeSize, uint imageWidthPixels, uint imageHeightPixels, float tStep, int interpolation) { const uint image_width_pixels = imageWidthPixels; const uint image_height_pixels = imageHeightPixels; const float3 volume_size = volumeSize; const float3 source_position = sourcePosition; const float tstep = tStep; const int maxSteps = MAX_STEPS; //const int maxSteps = MAX_STEPS; //(volume_size.x^2+volume_size.y^2+volume_size.z^2)^0.5f/tStep; //diagonal of the bounding box const float3 boxMin = make_float3(0.0f, 0.0f, 0.0f); const float3 boxMax = make_float3(volume_size.x, volume_size.y, volume_size.z); const float3 rec_volume_size = 1.0f/volume_size; //x and y index detector pixel uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= image_width_pixels) || (y >= image_height_pixels)) return; //u and v are in normalized detector pixel [0,0]->[1,1] float u = (x / (float) image_width_pixels); float v = (y / (float) image_height_pixels); // calculate eye ray in world space Ray eyeRay; eyeRay.o = source_position; //transform and normalize direction vector eyeRay.d = normalize(make_float3(mul(c_invViewMatrix, make_float4(u,v,0.0f,1.0f)))-eyeRay.o); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating float sum; float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for(int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] coordinates //float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); float sample = tex3D(tex, pos.x*rec_volume_size.x, pos.y*rec_volume_size.y, pos.z*rec_volume_size.z); sum = sum + sample; t += tstep; if (t > tfar) break; pos += step; } d_output[y*image_width_pixels + x] = sum; } extern "C" void setTextureFilterMode(bool bLinearFilter) { tex.filterMode = bLinearFilter ? cudaFilterModeLinear : cudaFilterModePoint; } extern "C" void initCuda(void *h_volume, cudaExtent volumeSize) { // create 3D array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<VolumeType>(); CUDA_SAFE_CALL( cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize) ); // copy data to 3D array cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyHostToDevice; CUDA_SAFE_CALL( cudaMemcpy3D(&copyParams) ); // set texture parameters tex.normalized = true; // access with normalized texture coordinates tex.filterMode = cudaFilterModeLinear; // linear interpolation tex.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates tex.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture CUDA_SAFE_CALL(cudaBindTextureToArray(tex, d_volumeArray, channelDesc)); } extern "C" void freeCudaBuffers() { CUDA_SAFE_CALL(cudaFreeArray(d_volumeArray)); } extern "C" void tt_line_project_ray_gpu(dim3 gridSize, dim3 blockSize, float *d_output, float3 source_position, float3 volume_size, uint imageW, uint imageH, float t_step, int interpolation) { d_tt_line_project_ray_gpu<<<gridSize, blockSize>>>( d_output, source_position, volume_size, imageW, imageH, t_step, interpolation); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { /* fprintf(stderr,"\nMatrix:"); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[0],invViewMatrix[1],invViewMatrix[3],invViewMatrix[3]); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[4],invViewMatrix[5],invViewMatrix[6],invViewMatrix[7]); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[8],invViewMatrix[9],invViewMatrix[10],invViewMatrix[11]); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[12],invViewMatrix[13],invViewMatrix[14],invViewMatrix[15]);*/ CUDA_SAFE_CALL( cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix) ); } #endif // #ifndef _TTPROJECTRAY_KERNEL_CU_
c87aedb4ef2cbd5ea5e5a4273400a3070454547d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* compile the program as: nvcc -arch sm_75 hello.cu -o hello sm_ 75Turing */ #include <stdio.h> __global__ void helloFromGPU() { if(threadIdx.x == 5) printf("Hello World from GPU !\n"); } int main() { printf("Hello World from CPU !\n"); hipLaunchKernelGGL(( helloFromGPU) , dim3(1), dim3(10), 0, 0, ); hipDeviceReset(); // hipDeviceSynchronize(); return 0; }
c87aedb4ef2cbd5ea5e5a4273400a3070454547d.cu
/* compile the program as: nvcc -arch sm_75 hello.cu -o hello 其中sm_后面的数字随着显卡架构不同而不同 75对应的是Turing架构 */ #include <stdio.h> __global__ void helloFromGPU() { if(threadIdx.x == 5) printf("Hello World from GPU !\n"); } int main() { printf("Hello World from CPU !\n"); helloFromGPU <<<1, 10>>>(); cudaDeviceReset(); // cudaDeviceSynchronize(); return 0; }
950c349d34df2dcb920a897f9466ff2123767344.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMode.cu" #else void THCTensor_(calculateMode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, THCudaLongStorage *sortBuffer, int dimension, THLongStorage *position) { THAssert(THCTensor_(isContiguous)(state, input)); // Because the input is contiguous, we want to get a reference to the // location of the buffer at the innermost dimension that we are going // to calculate the mode for --> we do this by manually doing the stride // calculations to get an offset scalar_t *data = THCTensor_(data)(state, input); for (int i = 0; i < THLongStorage_size(position); ++i) { data += THLongStorage_data(position)[i] * THTensor_strideLegacyNoScalars(input, i); } int64_t nElement = THCTensor_(sizeLegacyNoScalars)(state, input, THCTensor_(nDimensionLegacyAll)(state, input) - 1); THCThrustAllocator thrustAlloc(state); // Wrap input data, sortBuffer, in Thrust device vectors thrust::device_ptr<scalar_t> vecPtr = thrust::device_pointer_cast(data); thrust::device_vector<scalar_t> iter(vecPtr, vecPtr + nElement); thrust::device_ptr<int64_t> sbPtr = thrust::device_pointer_cast(THCudaLongStorage_data(state, sortBuffer)); thrust::device_vector<int64_t> seq(sbPtr, sbPtr + nElement); // Fill sortBuffer with [0, 1, 2, ... nElement - 1] thrust::sequence( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif seq.begin(), seq.end()); // Sort the input data. The original indices of the data are stored in seq thrust::sort_by_key( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), seq.begin() #if defined(THC_REAL_IS_HALF) , ThrustHalfLess() #endif ); // Count # of unique elements via an inner product between adjacent elements. // Add 1 if two neighboring element are not equal. int unique = 1 + thrust::inner_product( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end() - 1, iter.begin() + 1, 0, thrust::plus<int>(), #if defined(THC_REAL_IS_HALF) ThrustHalfNotEqualTo() #else thrust::not_equal_to<scalar_t>() #endif ); // Count frequency of each element thrust::device_vector<scalar_t> keys(unique); thrust::device_vector<int> counts(unique); thrust::reduce_by_key( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), thrust::constant_iterator<int>(1), keys.begin(), counts.begin() #if defined(THC_REAL_IS_HALF) , ThrustHalfEqualTo() #endif ); // Find index of maximum count thrust::device_vector<int>::iterator it = thrust::max_element( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif counts.begin(), counts.end()); scalar_t mode = keys[it - counts.begin()]; // Find first index within which it occurs #if defined(THC_REAL_IS_HALF) thrust::device_vector<scalar_t>::iterator positionIter = thrust::find_if( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), ThrustHalfEqualToPredicate(mode)); #else thrust::device_vector<scalar_t>::iterator positionIter = thrust::find( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), mode); #endif THAssert(positionIter != iter.end()); int64_t index = TH_INDEX_BASE + seq[positionIter - iter.begin()]; // Place mode, index in output ptrdiff_t valuesOffset = THCTensor_(storageOffset)(state, values); int64_t indicesOffset = THCudaLongTensor_storageOffset(state, indices); for (int i = 0; i < THLongStorage_size(position); ++i) { int64_t pos = THLongStorage_data(position)[i]; valuesOffset += THTensor_strideLegacyNoScalars(values, i) * pos; indicesOffset += THTensor_strideLegacyNoScalars(indices, i) * pos; } THCStorage_(set)(state, THCTensor_(storage)(state, values), valuesOffset, mode); THCudaLongStorage_set(state, THCudaLongTensor_storage(state, indices), indicesOffset, index); } // this probably could be a loop, not a recursive algorithm void THCTensor_(dimApplyMode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, THCudaLongStorage *sortBuffer, int dimension, THLongStorage *position, int curDim) { int64_t ndim = THCTensor_(nDimensionLegacyAll)(state, input); // Because we have transposed the Tensor, the data for the dimension we are mode'ing along // is always in the innermost dimension if (curDim == ndim - 1) { THCTensor_(calculateMode)(state, values, indices, input, sortBuffer, dimension, position); } else { // Loop through the values and recurse for (int i = 0; i < THCTensor_(sizeLegacyNoScalars)(state, input, curDim); ++i) { THLongStorage_data(position)[curDim] = i; THCTensor_(dimApplyMode)(state, values, indices, input, sortBuffer, dimension, position, curDim + 1); } } } #define MAX_GRID_SIZE 65535 #define MAX_BLOCK_SIZE 1024 void THCTensor_(mode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, int dimension, int keepdim) { THCTensor *transposed, *contiguous, *valuesTransposed; THLongStorage *position; THCudaLongStorage *sortBuffer; THCudaLongTensor *indicesTransposed; int64_t ndim, sliceSize, slices; THAssert(THCTensor_(checkGPU)(state, 1, values)); // Verify they are asking for a valid dimension ndim = THCTensor_(nDimensionLegacyAll)(state, input); THArgCheck(dimension >= 0 && dimension < ndim, 4, "Dimension of out bounds"); sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input, dimension); slices = THCTensor_(nElement)(state, input) / sliceSize; // Resize output value, index Tensors to appropriate sizes (i.e. the same as // the input Tensor, except at dim=dimension, the size is 1) THCTensor_preserveReduceDimSemantics( state, values, ndim, dimension, keepdim); THCTensor_preserveReduceDimSemantics( state, indices, ndim, dimension, keepdim); std::vector<int64_t> dim = THTensor_sizesLegacyNoScalars(input); dim[dimension] = 1; THCTensor_(resize)(state, values, dim, {}); THCudaLongTensor_resize(state, indices, dim, {}); // If sliceSize is 1, copy input to values and set indices if (sliceSize == 1) { THCTensor_(copy)(state, values, input); THCudaLongTensor_fill(state, indices, TH_INDEX_BASE); if (!keepdim) { THCTensor_(squeeze1d)(state, values, values, dimension); THCudaLongTensor_squeeze1d(state, indices, indices, dimension); } return; } // Requirements for fused kernel implementation: // // 1. sliceSize <= 2 * max threads per block // 2. uses one block per slice, so number of slices must be less than the maximum number of blocks for // a kernel launch // 3. Can use 32-bit index math for indexing (mainly just for implementation conciseness, could be changed) if (sliceSize <= MAX_BLOCK_SIZE && slices <= MAX_GRID_SIZE && THCTensor_canUse32BitIndexMath(state, input)) { // Beginning our optimized implementation. First thing we want to do is to transpose // the input Tensor along the sort dimension, and then make it contiguous transposed = THCTensor_(newTranspose)(state, input, dimension, ndim - 1); contiguous = THCTensor_(newContiguous)(state, transposed); // We also need to view the values and indices Tensors as transposed in order to // properly determine the offset into the underlying storage in which to place the // mode and index for a particular set of dimension values valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim-1); indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim-1); // Set-up TensorInfo structs for passing to kernel TensorInfo<scalar_t, unsigned int> tiValues = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, valuesTransposed); TensorInfo<int64_t, unsigned int> tiIndices = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indicesTransposed); // The number of blocks is the number of slices that we need to calculate the mode for. Each block // is responsible for computing a single mode dim3 grid; THC_getGridFromTiles(slices, grid); // The blocksize is two elements per thread, rounded up to the nearest power of 2 int64_t ceilPowerOf2 = nextHighestPowerOf2(sliceSize); // Macro that calls kernel --> note that we set the block dimensions here, and // the amount of shared memory #define HANDLE_MODE(SIZE) \ { \ dim3 blockSize(SIZE / 2); \ \ int memsize = (sizeof(scalar_t) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \ hipLaunchKernelGGL(( computeMode<scalar_t, SIZE>) \ , dim3(grid), dim3(blockSize), memsize, THCState_getCurrentStream(state), \ THCTensor_(data)(state, contiguous), tiValues, tiIndices, sliceSize); \ } // Tradeoff between compilation time and the number of specializations. Ideally we would have // one HANDLE_MODE for each power of 2 switch(ceilPowerOf2) { case 2048: HANDLE_MODE(2048) break; case 1024: case 512: case 256: HANDLE_MODE(1024) break; case 128: case 64: HANDLE_MODE(128) break; case 32: case 16: case 8: case 4: case 2: HANDLE_MODE(32) break; case 1: default: assert(false); } THCudaCheck(hipGetLastError()); THCTensor_(free)(state, transposed); THCTensor_(free)(state, contiguous); THCTensor_(free)(state, valuesTransposed); THCudaLongTensor_free(state, indicesTransposed); } else { // Beginning our naive implementation: We don't want to mutate the input Tensor, but // we need to be able to sort the inputs along the dimension in order to calculate the // mode. Additionally, its ideal if the data along the dimension is contiguous. So // we transpose the dimension with the innermost dimension and make a new contiguous // version that we can use. transposed = THCTensor_(newClone)(state, input); THCTensor_(transpose)(state, transposed, NULL, dimension, ndim - 1); contiguous = THCTensor_(newContiguous)(state, transposed); THCTensor_(free)(state, transposed); // We also need to view the values and indices Tensors as transposed in order to // properly determine the offset into the underlying storage in which to place the // mode and index for a particular set of dimension values valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim - 1); indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim - 1); // Position is a Storage that will store the dimension values we are processing position = THLongStorage_newWithSize(ndim - 1); // Sort Buffer is a Storage that will be used in the internal sort required to calculate // the mode efficiently sortBuffer = THCudaLongStorage_newWithSize(state, sliceSize); // Call mode THCTensor_(dimApplyMode)(state, valuesTransposed, indicesTransposed, contiguous, sortBuffer, dimension, position, 0); THCTensor_(free)(state, contiguous); THLongStorage_free(position); THCTensor_(free)(state, valuesTransposed); THCudaLongTensor_free(state, indicesTransposed); THCudaLongStorage_free(state, sortBuffer); } if (!keepdim) { THCTensor_(squeeze1d)(state, values, values, dimension); THCudaLongTensor_squeeze1d(state, indices, indices, dimension); } } #undef MAX_GRID_SIZE #undef MAX_BLOCK_SIZE #endif
950c349d34df2dcb920a897f9466ff2123767344.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMode.cu" #else void THCTensor_(calculateMode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, THCudaLongStorage *sortBuffer, int dimension, THLongStorage *position) { THAssert(THCTensor_(isContiguous)(state, input)); // Because the input is contiguous, we want to get a reference to the // location of the buffer at the innermost dimension that we are going // to calculate the mode for --> we do this by manually doing the stride // calculations to get an offset scalar_t *data = THCTensor_(data)(state, input); for (int i = 0; i < THLongStorage_size(position); ++i) { data += THLongStorage_data(position)[i] * THTensor_strideLegacyNoScalars(input, i); } int64_t nElement = THCTensor_(sizeLegacyNoScalars)(state, input, THCTensor_(nDimensionLegacyAll)(state, input) - 1); THCThrustAllocator thrustAlloc(state); // Wrap input data, sortBuffer, in Thrust device vectors thrust::device_ptr<scalar_t> vecPtr = thrust::device_pointer_cast(data); thrust::device_vector<scalar_t> iter(vecPtr, vecPtr + nElement); thrust::device_ptr<int64_t> sbPtr = thrust::device_pointer_cast(THCudaLongStorage_data(state, sortBuffer)); thrust::device_vector<int64_t> seq(sbPtr, sbPtr + nElement); // Fill sortBuffer with [0, 1, 2, ... nElement - 1] thrust::sequence( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif seq.begin(), seq.end()); // Sort the input data. The original indices of the data are stored in seq thrust::sort_by_key( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), seq.begin() #if defined(THC_REAL_IS_HALF) , ThrustHalfLess() #endif ); // Count # of unique elements via an inner product between adjacent elements. // Add 1 if two neighboring element are not equal. int unique = 1 + thrust::inner_product( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end() - 1, iter.begin() + 1, 0, thrust::plus<int>(), #if defined(THC_REAL_IS_HALF) ThrustHalfNotEqualTo() #else thrust::not_equal_to<scalar_t>() #endif ); // Count frequency of each element thrust::device_vector<scalar_t> keys(unique); thrust::device_vector<int> counts(unique); thrust::reduce_by_key( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), thrust::constant_iterator<int>(1), keys.begin(), counts.begin() #if defined(THC_REAL_IS_HALF) , ThrustHalfEqualTo() #endif ); // Find index of maximum count thrust::device_vector<int>::iterator it = thrust::max_element( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif counts.begin(), counts.end()); scalar_t mode = keys[it - counts.begin()]; // Find first index within which it occurs #if defined(THC_REAL_IS_HALF) thrust::device_vector<scalar_t>::iterator positionIter = thrust::find_if( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), ThrustHalfEqualToPredicate(mode)); #else thrust::device_vector<scalar_t>::iterator positionIter = thrust::find( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), mode); #endif THAssert(positionIter != iter.end()); int64_t index = TH_INDEX_BASE + seq[positionIter - iter.begin()]; // Place mode, index in output ptrdiff_t valuesOffset = THCTensor_(storageOffset)(state, values); int64_t indicesOffset = THCudaLongTensor_storageOffset(state, indices); for (int i = 0; i < THLongStorage_size(position); ++i) { int64_t pos = THLongStorage_data(position)[i]; valuesOffset += THTensor_strideLegacyNoScalars(values, i) * pos; indicesOffset += THTensor_strideLegacyNoScalars(indices, i) * pos; } THCStorage_(set)(state, THCTensor_(storage)(state, values), valuesOffset, mode); THCudaLongStorage_set(state, THCudaLongTensor_storage(state, indices), indicesOffset, index); } // this probably could be a loop, not a recursive algorithm void THCTensor_(dimApplyMode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, THCudaLongStorage *sortBuffer, int dimension, THLongStorage *position, int curDim) { int64_t ndim = THCTensor_(nDimensionLegacyAll)(state, input); // Because we have transposed the Tensor, the data for the dimension we are mode'ing along // is always in the innermost dimension if (curDim == ndim - 1) { THCTensor_(calculateMode)(state, values, indices, input, sortBuffer, dimension, position); } else { // Loop through the values and recurse for (int i = 0; i < THCTensor_(sizeLegacyNoScalars)(state, input, curDim); ++i) { THLongStorage_data(position)[curDim] = i; THCTensor_(dimApplyMode)(state, values, indices, input, sortBuffer, dimension, position, curDim + 1); } } } #define MAX_GRID_SIZE 65535 #define MAX_BLOCK_SIZE 1024 void THCTensor_(mode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, int dimension, int keepdim) { THCTensor *transposed, *contiguous, *valuesTransposed; THLongStorage *position; THCudaLongStorage *sortBuffer; THCudaLongTensor *indicesTransposed; int64_t ndim, sliceSize, slices; THAssert(THCTensor_(checkGPU)(state, 1, values)); // Verify they are asking for a valid dimension ndim = THCTensor_(nDimensionLegacyAll)(state, input); THArgCheck(dimension >= 0 && dimension < ndim, 4, "Dimension of out bounds"); sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input, dimension); slices = THCTensor_(nElement)(state, input) / sliceSize; // Resize output value, index Tensors to appropriate sizes (i.e. the same as // the input Tensor, except at dim=dimension, the size is 1) THCTensor_preserveReduceDimSemantics( state, values, ndim, dimension, keepdim); THCTensor_preserveReduceDimSemantics( state, indices, ndim, dimension, keepdim); std::vector<int64_t> dim = THTensor_sizesLegacyNoScalars(input); dim[dimension] = 1; THCTensor_(resize)(state, values, dim, {}); THCudaLongTensor_resize(state, indices, dim, {}); // If sliceSize is 1, copy input to values and set indices if (sliceSize == 1) { THCTensor_(copy)(state, values, input); THCudaLongTensor_fill(state, indices, TH_INDEX_BASE); if (!keepdim) { THCTensor_(squeeze1d)(state, values, values, dimension); THCudaLongTensor_squeeze1d(state, indices, indices, dimension); } return; } // Requirements for fused kernel implementation: // // 1. sliceSize <= 2 * max threads per block // 2. uses one block per slice, so number of slices must be less than the maximum number of blocks for // a kernel launch // 3. Can use 32-bit index math for indexing (mainly just for implementation conciseness, could be changed) if (sliceSize <= MAX_BLOCK_SIZE && slices <= MAX_GRID_SIZE && THCTensor_canUse32BitIndexMath(state, input)) { // Beginning our optimized implementation. First thing we want to do is to transpose // the input Tensor along the sort dimension, and then make it contiguous transposed = THCTensor_(newTranspose)(state, input, dimension, ndim - 1); contiguous = THCTensor_(newContiguous)(state, transposed); // We also need to view the values and indices Tensors as transposed in order to // properly determine the offset into the underlying storage in which to place the // mode and index for a particular set of dimension values valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim-1); indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim-1); // Set-up TensorInfo structs for passing to kernel TensorInfo<scalar_t, unsigned int> tiValues = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, valuesTransposed); TensorInfo<int64_t, unsigned int> tiIndices = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indicesTransposed); // The number of blocks is the number of slices that we need to calculate the mode for. Each block // is responsible for computing a single mode dim3 grid; THC_getGridFromTiles(slices, grid); // The blocksize is two elements per thread, rounded up to the nearest power of 2 int64_t ceilPowerOf2 = nextHighestPowerOf2(sliceSize); // Macro that calls kernel --> note that we set the block dimensions here, and // the amount of shared memory #define HANDLE_MODE(SIZE) \ { \ dim3 blockSize(SIZE / 2); \ \ int memsize = (sizeof(scalar_t) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \ computeMode<scalar_t, SIZE> \ <<<grid, blockSize, memsize, THCState_getCurrentStream(state)>>>( \ THCTensor_(data)(state, contiguous), tiValues, tiIndices, sliceSize); \ } // Tradeoff between compilation time and the number of specializations. Ideally we would have // one HANDLE_MODE for each power of 2 switch(ceilPowerOf2) { case 2048: HANDLE_MODE(2048) break; case 1024: case 512: case 256: HANDLE_MODE(1024) break; case 128: case 64: HANDLE_MODE(128) break; case 32: case 16: case 8: case 4: case 2: HANDLE_MODE(32) break; case 1: default: assert(false); } THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, transposed); THCTensor_(free)(state, contiguous); THCTensor_(free)(state, valuesTransposed); THCudaLongTensor_free(state, indicesTransposed); } else { // Beginning our naive implementation: We don't want to mutate the input Tensor, but // we need to be able to sort the inputs along the dimension in order to calculate the // mode. Additionally, its ideal if the data along the dimension is contiguous. So // we transpose the dimension with the innermost dimension and make a new contiguous // version that we can use. transposed = THCTensor_(newClone)(state, input); THCTensor_(transpose)(state, transposed, NULL, dimension, ndim - 1); contiguous = THCTensor_(newContiguous)(state, transposed); THCTensor_(free)(state, transposed); // We also need to view the values and indices Tensors as transposed in order to // properly determine the offset into the underlying storage in which to place the // mode and index for a particular set of dimension values valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim - 1); indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim - 1); // Position is a Storage that will store the dimension values we are processing position = THLongStorage_newWithSize(ndim - 1); // Sort Buffer is a Storage that will be used in the internal sort required to calculate // the mode efficiently sortBuffer = THCudaLongStorage_newWithSize(state, sliceSize); // Call mode THCTensor_(dimApplyMode)(state, valuesTransposed, indicesTransposed, contiguous, sortBuffer, dimension, position, 0); THCTensor_(free)(state, contiguous); THLongStorage_free(position); THCTensor_(free)(state, valuesTransposed); THCudaLongTensor_free(state, indicesTransposed); THCudaLongStorage_free(state, sortBuffer); } if (!keepdim) { THCTensor_(squeeze1d)(state, values, values, dimension); THCudaLongTensor_squeeze1d(state, indices, indices, dimension); } } #undef MAX_GRID_SIZE #undef MAX_BLOCK_SIZE #endif
541b919a2c6d8f9b9800e36803486901c3d6a80e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> // Print device properties void printDevProp(hipDeviceProp_t devProp) { //printf("Major revision number: %d\n", devProp.major); //printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); //printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); //printf("Texture alignment: %u\n", devProp.textureAlignment); //printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { // Number of CUDA devices int devCount; hipGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); printDevProp(devProp); } }
541b919a2c6d8f9b9800e36803486901c3d6a80e.cu
#include <stdio.h> // Print device properties void printDevProp(cudaDeviceProp devProp) { //printf("Major revision number: %d\n", devProp.major); //printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); //printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); //printf("Texture alignment: %u\n", devProp.textureAlignment); //printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDevProp(devProp); } }
6ad031d91f3e7a2e2ac374908e9d4d67bf2913a9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> __global__ void sayHello() { printf("Basic Info: BlockId = %d, ThreadId in block = %d\n", blockIdx.x, threadIdx.x); printf("Hello World from the GPU [unique thread no. = %d]\n", blockIdx.x * blockDim.x + threadIdx.x); } int main() { hipLaunchKernelGGL(( sayHello), dim3(3), dim3(2), 0, 0, ); printf("Hello World from the CPU\n"); hipDeviceSynchronize(); return EXIT_SUCCESS; }
6ad031d91f3e7a2e2ac374908e9d4d67bf2913a9.cu
#include <stdio.h> #include <cuda.h> __global__ void sayHello() { printf("Basic Info: BlockId = %d, ThreadId in block = %d\n", blockIdx.x, threadIdx.x); printf("Hello World from the GPU [unique thread no. = %d]\n", blockIdx.x * blockDim.x + threadIdx.x); } int main() { sayHello<<<3, 2>>>(); printf("Hello World from the CPU\n"); cudaDeviceSynchronize(); return EXIT_SUCCESS; }
c95a16a56a2bfad6522b03dcd38113c2f1ebf1f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #pragma warning(push, 0) #include <stdio.h> #include <stdlib.h> #include <iostream> #include <math.h> #include <thrust\device_ptr.h> #include <thrust\sequence.h> #include <device_launch_parameters.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #pragma warning(pop) #include "parallelGpuNode2.cuh" #include "Node.h" #include "RandomGame.h" #include "CUDA_def.h" __global__ void PlayRandomGameGPU2(int n, int* results, char* boards, short boardSize, char* playersToMoveNext, char player) { int i = blockIdx.x * blockDim.x * blockDim.y * blockDim.z + blockDim.x * blockDim.y * threadIdx.z + blockDim.x * threadIdx.y + threadIdx.x; if (i >= n) return; char* cBoard = boards + i * boardSize * boardSize / 2 * sizeof(char); char playerToMoveNext = playersToMoveNext[i]; //SHARED MEMORY extern __shared__ char s[]; size_t offset = (blockDim.x * blockDim.y * threadIdx.z + blockDim.x * threadIdx.y + threadIdx.x); char* sharedBoard = s + offset; for (size_t i = 0; i < boardSize * boardSize / 2; i++) // sets single board in one shared memory bank to minimalize confilcts { sharedBoard[i * BANKSCOUNT] = cBoard[i]; } Board board = Board(sharedBoard, boardSize); //~~SHARED MEMORY hiprandState_t state; hiprand_init(0, i, 1, &state); char result; PlayRandomGame(&board, playerToMoveNext, &result, [&]__device__(int minIn, int maxEx) { return hiprand(&state) % maxEx + minIn; }); results[i] = GetResultPoints(result, player); } void CallPlayRandomGameGPU2(dim3 numBlocks, dim3 threadsPerBlock, int n, int* results, char* boards, short boardSize, char* playersToMoveNext, char player) { size_t sharedMemorySize = threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z * boardSize * boardSize / 2 * sizeof(char); PlayRandomGameGPU2 << <numBlocks, threadsPerBlock, sharedMemorySize >> > (n, results, boards, boardSize, playersToMoveNext, player); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); }
c95a16a56a2bfad6522b03dcd38113c2f1ebf1f6.cu
#pragma once #pragma warning(push, 0) #include <stdio.h> #include <stdlib.h> #include <iostream> #include <math.h> #include <thrust\device_ptr.h> #include <thrust\sequence.h> #include <device_launch_parameters.h> #include <curand.h> #include <curand_kernel.h> #pragma warning(pop) #include "parallelGpuNode2.cuh" #include "Node.h" #include "RandomGame.h" #include "CUDA_def.h" __global__ void PlayRandomGameGPU2(int n, int* results, char* boards, short boardSize, char* playersToMoveNext, char player) { int i = blockIdx.x * blockDim.x * blockDim.y * blockDim.z + blockDim.x * blockDim.y * threadIdx.z + blockDim.x * threadIdx.y + threadIdx.x; if (i >= n) return; char* cBoard = boards + i * boardSize * boardSize / 2 * sizeof(char); char playerToMoveNext = playersToMoveNext[i]; //SHARED MEMORY extern __shared__ char s[]; size_t offset = (blockDim.x * blockDim.y * threadIdx.z + blockDim.x * threadIdx.y + threadIdx.x); char* sharedBoard = s + offset; for (size_t i = 0; i < boardSize * boardSize / 2; i++) // sets single board in one shared memory bank to minimalize confilcts { sharedBoard[i * BANKSCOUNT] = cBoard[i]; } Board board = Board(sharedBoard, boardSize); //~~SHARED MEMORY curandState_t state; curand_init(0, i, 1, &state); char result; PlayRandomGame(&board, playerToMoveNext, &result, [&]__device__(int minIn, int maxEx) { return curand(&state) % maxEx + minIn; }); results[i] = GetResultPoints(result, player); } void CallPlayRandomGameGPU2(dim3 numBlocks, dim3 threadsPerBlock, int n, int* results, char* boards, short boardSize, char* playersToMoveNext, char player) { size_t sharedMemorySize = threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z * boardSize * boardSize / 2 * sizeof(char); PlayRandomGameGPU2 << <numBlocks, threadsPerBlock, sharedMemorySize >> > (n, results, boards, boardSize, playersToMoveNext, player); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); }
ee682b698724b038e190fcabc0172c96764ab418.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #include <opencv2/core.hpp> #include <opencv2/imgcodecs.hpp> #include <opencv2/highgui.hpp> #include <iostream> #include <algorithm> #include <ctype.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #define MAX_SAVES 1000 #define QTD_ROOMS 3 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } struct point{ int x; int y; }; struct rect{ point pt; int alt; int lar; }; struct uint32Pt{ uint32_t *CPU; uint32_t *GPU; }; struct uint8Pt{ uint8_t *CPU; uint8_t *GPU; }; struct int16Pt{ int16_t *CPU; int16_t *GPU; }; typedef struct point point; typedef struct rect rect; typedef struct uint32Pt uint32Pt; typedef struct uint8Pt uint8Pt; typedef struct int16Pt int16Pt; //---GPU CODE---// //fact est utilis pour former la permutation actuelle //n est la nTh permutation __global__ void GenSeq(uint32_t *fact, uint8_t *rooms, int16_t *result, const uint32_t n, const uint32_t offset, const uint32_t maxMYgtid){ int8_t i, j; uint8_t perm[QTD_ROOMS]; uint32_t ThrPerBlk = blockDim.x; uint32_t MYbid = blockIdx.x; uint32_t MYtid = threadIdx.x; uint32_t MYgtid = ThrPerBlk * MYbid + MYtid + offset; //overflow possible? if(MYgtid >= maxMYgtid) return; //premier partie de calculer la permutation //calcule le factoriel code i = n; for (j = 0; j < QTD_ROOMS; ++j){ perm[j] = i / fact[QTD_ROOMS - 1 - j]; i = i % fact[QTD_ROOMS - 1 - j]; } // readjust values to obtain the permutation // start from the end and check if preceding values are lower for (i = QTD_ROOMS - 1; i > 0; --i) for (j = i - 1; j >= 0; --j) if (perm[j] <= perm[i]) perm[i]++; // uint8_t p1 = 0, p2 = 0, p3 = 0, p4 = 0; // uint8_t p_p1 = 0, p_p2 = 0, p_p3 = 0, p_p4 = 0; uint8_t curr_room = 0; uint32_t curr_result = 0; uint8_t p0 = 0; uint8_t p1 = 0; uint8_t alt = 0, lar = 0, prev_lar = 0, prev_alt = 0; int16_t prev_x = 0, prev_y = 0; // int16_t temp_result[4*QTD_ROOMS]; prev_alt = rooms[curr_room++]; prev_lar = rooms[curr_room++]; if(MYgtid & 1){ prev_alt ^= prev_lar; prev_lar ^= prev_alt; prev_alt ^= prev_lar; } // curr_result = 0; prev_x = 0; prev_y = 0; curr_result = n*maxMYgtid + MYgtid*4*QTD_ROOMS; result[curr_result++] = prev_x; result[curr_result++] = prev_y; result[curr_result++] = prev_alt; result[curr_result++] = prev_lar; MYgtid = MYgtid >> 1; for(i = 1; i < QTD_ROOMS; i++){ alt = rooms[curr_room++]; lar = rooms[curr_room++]; //si est rotacione, swap altura avec largura if(MYgtid & 1){ alt ^= lar; lar ^= alt; alt ^= lar; } p0 = (MYgtid >> 1) & 3; p1 = (MYgtid >> 3) & 3; prev_x = prev_x + (p0 & 1)*prev_lar - (p1 & 1)*lar; prev_y = prev_y + (p0 >> 1)*prev_alt - (p1 >> 1)*lar; prev_lar = lar; prev_alt = alt; result[curr_result++] = prev_x; result[curr_result++] = prev_y; result[curr_result++] = alt; result[curr_result++] = lar; MYgtid = MYgtid >> 5; } // uint32_t currIndex = 4*QTD_ROOMS*n; // for(i = 0; i < 4*QTD_ROOMS; i++){ // result[currIndex++] = temp_result[i]; // } // if(n == 0) // curr_result = n*maxMYgtid + debug*4*QTD_ROOMS; // // curr_result = 0; // printf("%4d - %2d %2d %2d %2d - %2d %2d %2d %2d - %2d %2d %2d %2d\n", debug, result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++]); // printf("%d - %d %d %d %d\n", MYgtid, perm[0], perm[1], perm[2], perm[3]); } //---CPU CODE---// int *roomsSeq; //calcule le valeur de factorial __host__ int factorial(int x){ if(x <= 1) return 1; return x*factorial(x-1); } //vrifie si au moins une GPU est disponible //initialise les variables de temps (GPUtimes) __host__ void initCuda(hipEvent_t **GPUtimes, int qtdTimes){ int i = 0; int NumGPUs = 0; hipGetDeviceCount(&NumGPUs); if (NumGPUs == 0){ std::cout << "\nNo CUDA Device is available\n"; exit(EXIT_FAILURE); } hipEvent_t *tempTimes = (hipEvent_t*)malloc(sizeof(hipEvent_t) * qtdTimes); for(i = 0; i < qtdTimes; i++){ hipEventCreate(&(tempTimes[i])); } *GPUtimes = tempTimes; } //appelle la funcion hipEventDestroy pour tout les GPUTimes //appelle la funcion hipDeviceReset pour arreter de utilizer la GPU __host__ void endCuda(hipEvent_t **GPUtimes, int qtdTimes){ int i = 0; for(i = 0; i < qtdTimes; i++){ hipEventDestroy((*GPUtimes)[i]); } *GPUtimes = nullptr; hipError_t cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess){ std::cout << "hipDeviceReset failed!"; exit(EXIT_FAILURE); } } __host__ void setupRooms(uint8Pt *rooms){ uint8_t alturas[] = {60, 30, 15, 10, 10, 10, 10, 10}; uint8_t larguras[] = {40, 20, 10, 05, 05, 05, 05, 05}; rooms->CPU = (uint8_t*)calloc(QTD_ROOMS*2, sizeof(uint8_t)); int i = 0; for(i = 0; i < QTD_ROOMS; i++){ rooms->CPU[i*2] = alturas[i]; rooms->CPU[i*2 + 1] = larguras[i]; } gpuErrchk(hipMalloc((void **)&(rooms->GPU), QTD_ROOMS * 2 * sizeof(uint32_t))); gpuErrchk(hipMemcpy((rooms->GPU), rooms->CPU, QTD_ROOMS * 2 * sizeof(uint32_t), hipMemcpyHostToDevice)); } int main(){ hipEvent_t *GPUtimes; uint32_t ThrPerBlk = 64, NumBlocks = 0; int i = 0, j = 0, k = 0; int qtdTimes = 3; int32_t permutations = factorial(QTD_ROOMS); uint32_t roomsAtPerm = pow(2,QTD_ROOMS) * pow(4, (QTD_ROOMS - 1)*2); uint32_t possibilities = permutations * roomsAtPerm; uint32_t resultSize = possibilities * 4; initCuda(&GPUtimes, qtdTimes); //compute la quantite de blocks NumBlocks = (roomsAtPerm + ThrPerBlk - 1) / ThrPerBlk; std::cout << "NumBlocks: " << NumBlocks << ", ThrPerBlk: " << ThrPerBlk << ", Permutations: " << permutations << ", roomsAtPerm: " << roomsAtPerm << ", possibilities: " << possibilities << ", resultSize: " << resultSize << std::endl; // debug if(QTD_ROOMS > 6){ std::cout << "There is not enough ram" << std::endl; return EXIT_FAILURE; } uint8Pt rooms; setupRooms(&rooms); //compute factorial numbers uint32Pt fact; fact.CPU = (uint32_t*)calloc(QTD_ROOMS, sizeof(uint32_t)); fact.CPU[i] = 1; while (i++ < QTD_ROOMS) fact.CPU[i] = fact.CPU[i - 1] * i; gpuErrchk(hipMalloc((void **)&(fact.GPU), QTD_ROOMS * sizeof(uint32_t))); gpuErrchk(hipMemcpy((fact.GPU), fact.CPU, QTD_ROOMS * sizeof(uint32_t), hipMemcpyHostToDevice)); int16Pt result; result.CPU = (int16_t*)calloc(resultSize, sizeof(int16_t)); gpuErrchk(hipMalloc((void **)&(result.GPU), resultSize*sizeof(int16_t))); // void GenSeq(uint32_t *fact, uint8_t *rooms, int16_t *result, const uint32_t n, const uint32_t offset, const uint32_t maxMYgtid) for(i = 0; i < 1; i++){ hipLaunchKernelGGL(( GenSeq) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, fact.GPU, rooms.GPU, result.GPU, i, 0, roomsAtPerm); } gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipMemcpy(result.CPU, result.GPU, resultSize*sizeof(int16_t), hipMemcpyDeviceToHost)); gpuErrchk(hipDeviceSynchronize()); std::cout << "\n\nCPU\n\n"; for(i = 0; i < 1; i++){ for(j = 0; j <roomsAtPerm; j++){ std::cout << i*roomsAtPerm + j << " -\t"; for(k = 0; k < 4*QTD_ROOMS; k++){ if(k % 4 == 0) std::cout << "\t"; std::cout << result.CPU[i*roomsAtPerm + j*4*QTD_ROOMS + k] << " "; } std::cout << std::endl; // std::cout << result.CPU[i*roomsAtPerm + j] << " "; // if(j % 4 == 0) // std::cout << " "; // if(j % (4*QTD_ROOMS) == 0) // std::cout << "\n" << (i*roomsAtPerm + j)/(4*QTD_ROOMS) << " - "; } } free(fact.CPU); free(rooms.CPU); free(result.CPU); hipFree(fact.GPU); hipFree(rooms.GPU); hipFree(result.GPU); endCuda(&GPUtimes, qtdTimes); return(EXIT_SUCCESS); }
ee682b698724b038e190fcabc0172c96764ab418.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cuda.h> #include <opencv2/core.hpp> #include <opencv2/imgcodecs.hpp> #include <opencv2/highgui.hpp> #include <iostream> #include <algorithm> #include <ctype.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #define MAX_SAVES 1000 #define QTD_ROOMS 3 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } struct point{ int x; int y; }; struct rect{ point pt; int alt; int lar; }; struct uint32Pt{ uint32_t *CPU; uint32_t *GPU; }; struct uint8Pt{ uint8_t *CPU; uint8_t *GPU; }; struct int16Pt{ int16_t *CPU; int16_t *GPU; }; typedef struct point point; typedef struct rect rect; typedef struct uint32Pt uint32Pt; typedef struct uint8Pt uint8Pt; typedef struct int16Pt int16Pt; //---GPU CODE---// //fact est utilisé pour former la permutation actuelle //n est la nTh permutation __global__ void GenSeq(uint32_t *fact, uint8_t *rooms, int16_t *result, const uint32_t n, const uint32_t offset, const uint32_t maxMYgtid){ int8_t i, j; uint8_t perm[QTD_ROOMS]; uint32_t ThrPerBlk = blockDim.x; uint32_t MYbid = blockIdx.x; uint32_t MYtid = threadIdx.x; uint32_t MYgtid = ThrPerBlk * MYbid + MYtid + offset; //overflow possible? if(MYgtid >= maxMYgtid) return; //premier partie de calculer la permutation //calcule le factoriel code i = n; for (j = 0; j < QTD_ROOMS; ++j){ perm[j] = i / fact[QTD_ROOMS - 1 - j]; i = i % fact[QTD_ROOMS - 1 - j]; } // readjust values to obtain the permutation // start from the end and check if preceding values are lower for (i = QTD_ROOMS - 1; i > 0; --i) for (j = i - 1; j >= 0; --j) if (perm[j] <= perm[i]) perm[i]++; // uint8_t p1 = 0, p2 = 0, p3 = 0, p4 = 0; // uint8_t p_p1 = 0, p_p2 = 0, p_p3 = 0, p_p4 = 0; uint8_t curr_room = 0; uint32_t curr_result = 0; uint8_t p0 = 0; uint8_t p1 = 0; uint8_t alt = 0, lar = 0, prev_lar = 0, prev_alt = 0; int16_t prev_x = 0, prev_y = 0; // int16_t temp_result[4*QTD_ROOMS]; prev_alt = rooms[curr_room++]; prev_lar = rooms[curr_room++]; if(MYgtid & 1){ prev_alt ^= prev_lar; prev_lar ^= prev_alt; prev_alt ^= prev_lar; } // curr_result = 0; prev_x = 0; prev_y = 0; curr_result = n*maxMYgtid + MYgtid*4*QTD_ROOMS; result[curr_result++] = prev_x; result[curr_result++] = prev_y; result[curr_result++] = prev_alt; result[curr_result++] = prev_lar; MYgtid = MYgtid >> 1; for(i = 1; i < QTD_ROOMS; i++){ alt = rooms[curr_room++]; lar = rooms[curr_room++]; //si est rotacione, swap altura avec largura if(MYgtid & 1){ alt ^= lar; lar ^= alt; alt ^= lar; } p0 = (MYgtid >> 1) & 3; p1 = (MYgtid >> 3) & 3; prev_x = prev_x + (p0 & 1)*prev_lar - (p1 & 1)*lar; prev_y = prev_y + (p0 >> 1)*prev_alt - (p1 >> 1)*lar; prev_lar = lar; prev_alt = alt; result[curr_result++] = prev_x; result[curr_result++] = prev_y; result[curr_result++] = alt; result[curr_result++] = lar; MYgtid = MYgtid >> 5; } // uint32_t currIndex = 4*QTD_ROOMS*n; // for(i = 0; i < 4*QTD_ROOMS; i++){ // result[currIndex++] = temp_result[i]; // } // if(n == 0) // curr_result = n*maxMYgtid + debug*4*QTD_ROOMS; // // curr_result = 0; // printf("%4d - %2d %2d %2d %2d - %2d %2d %2d %2d - %2d %2d %2d %2d\n", debug, result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++], result[curr_result++]); // printf("%d - %d %d %d %d\n", MYgtid, perm[0], perm[1], perm[2], perm[3]); } //---CPU CODE---// int *roomsSeq; //calcule le valeur de factorial __host__ int factorial(int x){ if(x <= 1) return 1; return x*factorial(x-1); } //vérifie si au moins une GPU est disponible //initialise les variables de temps (GPUtimes) __host__ void initCuda(cudaEvent_t **GPUtimes, int qtdTimes){ int i = 0; int NumGPUs = 0; cudaGetDeviceCount(&NumGPUs); if (NumGPUs == 0){ std::cout << "\nNo CUDA Device is available\n"; exit(EXIT_FAILURE); } cudaEvent_t *tempTimes = (cudaEvent_t*)malloc(sizeof(cudaEvent_t) * qtdTimes); for(i = 0; i < qtdTimes; i++){ cudaEventCreate(&(tempTimes[i])); } *GPUtimes = tempTimes; } //appelle la funcion cudaEventDestroy pour tout les GPUTimes //appelle la funcion cudaDeviceReset pour arreter de utilizer la GPU __host__ void endCuda(cudaEvent_t **GPUtimes, int qtdTimes){ int i = 0; for(i = 0; i < qtdTimes; i++){ cudaEventDestroy((*GPUtimes)[i]); } *GPUtimes = nullptr; cudaError_t cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess){ std::cout << "cudaDeviceReset failed!"; exit(EXIT_FAILURE); } } __host__ void setupRooms(uint8Pt *rooms){ uint8_t alturas[] = {60, 30, 15, 10, 10, 10, 10, 10}; uint8_t larguras[] = {40, 20, 10, 05, 05, 05, 05, 05}; rooms->CPU = (uint8_t*)calloc(QTD_ROOMS*2, sizeof(uint8_t)); int i = 0; for(i = 0; i < QTD_ROOMS; i++){ rooms->CPU[i*2] = alturas[i]; rooms->CPU[i*2 + 1] = larguras[i]; } gpuErrchk(cudaMalloc((void **)&(rooms->GPU), QTD_ROOMS * 2 * sizeof(uint32_t))); gpuErrchk(cudaMemcpy((rooms->GPU), rooms->CPU, QTD_ROOMS * 2 * sizeof(uint32_t), cudaMemcpyHostToDevice)); } int main(){ cudaEvent_t *GPUtimes; uint32_t ThrPerBlk = 64, NumBlocks = 0; int i = 0, j = 0, k = 0; int qtdTimes = 3; int32_t permutations = factorial(QTD_ROOMS); uint32_t roomsAtPerm = pow(2,QTD_ROOMS) * pow(4, (QTD_ROOMS - 1)*2); uint32_t possibilities = permutations * roomsAtPerm; uint32_t resultSize = possibilities * 4; initCuda(&GPUtimes, qtdTimes); //compute la quantite de blocks NumBlocks = (roomsAtPerm + ThrPerBlk - 1) / ThrPerBlk; std::cout << "NumBlocks: " << NumBlocks << ", ThrPerBlk: " << ThrPerBlk << ", Permutations: " << permutations << ", roomsAtPerm: " << roomsAtPerm << ", possibilities: " << possibilities << ", resultSize: " << resultSize << std::endl; // debug if(QTD_ROOMS > 6){ std::cout << "There is not enough ram" << std::endl; return EXIT_FAILURE; } uint8Pt rooms; setupRooms(&rooms); //compute factorial numbers uint32Pt fact; fact.CPU = (uint32_t*)calloc(QTD_ROOMS, sizeof(uint32_t)); fact.CPU[i] = 1; while (i++ < QTD_ROOMS) fact.CPU[i] = fact.CPU[i - 1] * i; gpuErrchk(cudaMalloc((void **)&(fact.GPU), QTD_ROOMS * sizeof(uint32_t))); gpuErrchk(cudaMemcpy((fact.GPU), fact.CPU, QTD_ROOMS * sizeof(uint32_t), cudaMemcpyHostToDevice)); int16Pt result; result.CPU = (int16_t*)calloc(resultSize, sizeof(int16_t)); gpuErrchk(cudaMalloc((void **)&(result.GPU), resultSize*sizeof(int16_t))); // void GenSeq(uint32_t *fact, uint8_t *rooms, int16_t *result, const uint32_t n, const uint32_t offset, const uint32_t maxMYgtid) for(i = 0; i < 1; i++){ GenSeq <<< NumBlocks, ThrPerBlk >>> (fact.GPU, rooms.GPU, result.GPU, i, 0, roomsAtPerm); } gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(result.CPU, result.GPU, resultSize*sizeof(int16_t), cudaMemcpyDeviceToHost)); gpuErrchk(cudaDeviceSynchronize()); std::cout << "\n\nCPU\n\n"; for(i = 0; i < 1; i++){ for(j = 0; j <roomsAtPerm; j++){ std::cout << i*roomsAtPerm + j << " -\t"; for(k = 0; k < 4*QTD_ROOMS; k++){ if(k % 4 == 0) std::cout << "\t"; std::cout << result.CPU[i*roomsAtPerm + j*4*QTD_ROOMS + k] << " "; } std::cout << std::endl; // std::cout << result.CPU[i*roomsAtPerm + j] << " "; // if(j % 4 == 0) // std::cout << " "; // if(j % (4*QTD_ROOMS) == 0) // std::cout << "\n" << (i*roomsAtPerm + j)/(4*QTD_ROOMS) << " - "; } } free(fact.CPU); free(rooms.CPU); free(result.CPU); cudaFree(fact.GPU); cudaFree(rooms.GPU); cudaFree(result.GPU); endCuda(&GPUtimes, qtdTimes); return(EXIT_SUCCESS); }
1cfa9e9e8353d041a859ecbf8d45eee7d328c5af.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> hipError_t cuDeviceTotalMem(size_t* bytes, hipDevice_t dev) { printf("%s\n", "I just want to tell you that cuDeviceTotalMem is hijacked!"); *bytes = 1; return hipSuccess; }
1cfa9e9e8353d041a859ecbf8d45eee7d328c5af.cu
#include <cuda.h> #include <stdio.h> CUresult cuDeviceTotalMem(size_t* bytes, CUdevice dev) { printf("%s\n", "I just want to tell you that cuDeviceTotalMem is hijacked!"); *bytes = 1; return CUDA_SUCCESS; }
fef96799ff0a37577e2c13897572d65905ae0d50.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "omp_repair.h" #include <hip/hip_runtime.h> static long num_steps = 1000000; // 100 millions double step; #if __CUDA_ARCH__ < 600 __device__ double atomicAdd2(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif __global__ void cal_pi(long num_steps, double step, double *sum) { int i; double x; double local; local=0.0; for (i=1;i<= num_steps; i++){ x = (i-0.5)*step; local += 4.0/(1.0+x*x); } atomicAdd2(sum, local); } int main () { double pi, sum = 0.0; double start_time, run_time; step = 1.0/(double) num_steps; start_time = omp_get_wtime(); double *dev_sum; // capture the start time hipEvent_t start, stop; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start, 0 ); // hipMalloc((void **)&dev_sum, sizeof(double)); hipMemset(dev_sum, 0, sizeof(double)); hipLaunchKernelGGL(( cal_pi), dim3(1),dim3(1), 0, 0, num_steps,step,dev_sum); hipMemcpy(&sum, dev_sum, sizeof(double), hipMemcpyDeviceToHost); pi = step * sum; // get stop time, and display the timing results hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); float elapsedTime; hipEventElapsedTime( &elapsedTime, start, stop ); printf( "Time to compute : %3.1f ms\n", elapsedTime ); hipEventDestroy( start ); hipEventDestroy( stop ); run_time = omp_get_wtime() - start_time; printf("\n pi with %ld steps is %lf in %lf seconds\n ",num_steps,pi,run_time); }
fef96799ff0a37577e2c13897572d65905ae0d50.cu
#include <stdio.h> #include "omp_repair.h" #include <cuda.h> static long num_steps = 1000000; // 100 millions double step; #if __CUDA_ARCH__ < 600 __device__ double atomicAdd2(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif __global__ void cal_pi(long num_steps, double step, double *sum) { int i; double x; double local; local=0.0; for (i=1;i<= num_steps; i++){ x = (i-0.5)*step; local += 4.0/(1.0+x*x); } atomicAdd2(sum, local); } int main () { double pi, sum = 0.0; double start_time, run_time; step = 1.0/(double) num_steps; start_time = omp_get_wtime(); double *dev_sum; // capture the start time cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); // cudaMalloc((void **)&dev_sum, sizeof(double)); cudaMemset(dev_sum, 0, sizeof(double)); cal_pi<<<1,1>>>(num_steps,step,dev_sum); cudaMemcpy(&sum, dev_sum, sizeof(double), cudaMemcpyDeviceToHost); pi = step * sum; // get stop time, and display the timing results cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); float elapsedTime; cudaEventElapsedTime( &elapsedTime, start, stop ); printf( "Time to compute : %3.1f ms\n", elapsedTime ); cudaEventDestroy( start ); cudaEventDestroy( stop ); run_time = omp_get_wtime() - start_time; printf("\n pi with %ld steps is %lf in %lf seconds\n ",num_steps,pi,run_time); }
7ee8919976f6d01edc6ca1339e456b228bc7beee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define ARRAY(x,y) array[(x)-1+((y)-1)*PME_ORDER] /** * Calculate the spline coefficients for a single atom along a single axis. */ __device__ void computeBSplinePoint(real4* thetai, real w, real* array) { // initialization to get to 2nd order recursion ARRAY(2,2) = w; ARRAY(2,1) = 1 - w; // perform one pass to get to 3rd order recursion ARRAY(3,3) = 0.5f * w * ARRAY(2,2); ARRAY(3,2) = 0.5f * ((1+w)*ARRAY(2,1)+(2-w)*ARRAY(2,2)); ARRAY(3,1) = 0.5f * (1-w) * ARRAY(2,1); // compute standard B-spline recursion to desired order for (int i = 4; i <= PME_ORDER; i++) { int k = i - 1; real denom = RECIP(k); ARRAY(i,i) = denom * w * ARRAY(k,k); for (int j = 1; j <= i-2; j++) ARRAY(i,i-j) = denom * ((w+j)*ARRAY(k,i-j-1)+(i-j-w)*ARRAY(k,i-j)); ARRAY(i,1) = denom * (1-w) * ARRAY(k,1); } // get coefficients for the B-spline first derivative int k = PME_ORDER - 1; ARRAY(k,PME_ORDER) = ARRAY(k,PME_ORDER-1); for (int i = PME_ORDER-1; i >= 2; i--) ARRAY(k,i) = ARRAY(k,i-1) - ARRAY(k,i); ARRAY(k,1) = -ARRAY(k,1); // get coefficients for the B-spline second derivative k = PME_ORDER - 2; ARRAY(k,PME_ORDER-1) = ARRAY(k,PME_ORDER-2); for (int i = PME_ORDER-2; i >= 2; i--) ARRAY(k,i) = ARRAY(k,i-1) - ARRAY(k,i); ARRAY(k,1) = -ARRAY(k,1); ARRAY(k,PME_ORDER) = ARRAY(k,PME_ORDER-1); for (int i = PME_ORDER-1; i >= 2; i--) ARRAY(k,i) = ARRAY(k,i-1) - ARRAY(k,i); ARRAY(k,1) = -ARRAY(k,1); // get coefficients for the B-spline third derivative k = PME_ORDER - 3; ARRAY(k,PME_ORDER-2) = ARRAY(k,PME_ORDER-3); for (int i = PME_ORDER-3; i >= 2; i--) ARRAY(k,i) = ARRAY(k,i-1) - ARRAY(k,i); ARRAY(k,1) = -ARRAY(k,1); ARRAY(k,PME_ORDER-1) = ARRAY(k,PME_ORDER-2); for (int i = PME_ORDER-2; i >= 2; i--) ARRAY(k,i) = ARRAY(k,i-1) - ARRAY(k,i); ARRAY(k,1) = -ARRAY(k,1); ARRAY(k,PME_ORDER) = ARRAY(k,PME_ORDER-1); for (int i = PME_ORDER-1; i >= 2; i--) ARRAY(k,i) = ARRAY(k,i-1) - ARRAY(k,i); ARRAY(k,1) = -ARRAY(k,1); // copy coefficients from temporary to permanent storage for (int i = 1; i <= PME_ORDER; i++) thetai[i-1] = make_real4(ARRAY(PME_ORDER,i), ARRAY(PME_ORDER-1,i), ARRAY(PME_ORDER-2,i), ARRAY(PME_ORDER-3,i)); } /** * Compute the index of the grid point each atom is associated with. */ extern "C" __global__ void findAtomGridIndex(const real4* __restrict__ posq, int2* __restrict__ pmeAtomGridIndex, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { real4 pos = posq[i]; pos -= periodicBoxVecZ*floor(pos.z*recipBoxVecZ.z+0.5f); pos -= periodicBoxVecY*floor(pos.y*recipBoxVecY.z+0.5f); pos -= periodicBoxVecX*floor(pos.x*recipBoxVecX.z+0.5f); // First axis. real w = pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x; real fr = GRID_SIZE_X*(w-(int)(w+0.5f)+0.5f); int ifr = (int) fr; int igrid1 = ifr-PME_ORDER+1; // Second axis. w = pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y; fr = GRID_SIZE_Y*(w-(int)(w+0.5f)+0.5f); ifr = (int) fr; int igrid2 = ifr-PME_ORDER+1; // Third axis. w = pos.z*recipBoxVecZ.z; fr = GRID_SIZE_Z*(w-(int)(w+0.5f)+0.5f); ifr = (int) fr; int igrid3 = ifr-PME_ORDER+1; // Record the grid point. igrid1 += (igrid1 < 0 ? GRID_SIZE_X : 0); igrid2 += (igrid2 < 0 ? GRID_SIZE_Y : 0); igrid3 += (igrid3 < 0 ? GRID_SIZE_Z : 0); pmeAtomGridIndex[i] = make_int2(i, igrid1*GRID_SIZE_Y*GRID_SIZE_Z+igrid2*GRID_SIZE_Z+igrid3); } } /** * Convert the fixed multipoles from Cartesian to fractional coordinates. */ extern "C" __global__ void transformMultipolesToFractionalCoordinates(const real* __restrict__ labFrameDipole, const real* __restrict__ labFrameQuadrupole, real* __restrict__ fracDipole, real* __restrict__ fracQuadrupole, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // Build matrices for transforming the dipoles and quadrupoles. __shared__ real a[3][3]; if (threadIdx.x == 0) { a[0][0] = GRID_SIZE_X*recipBoxVecX.x; a[0][1] = GRID_SIZE_X*recipBoxVecY.x; a[0][2] = GRID_SIZE_X*recipBoxVecZ.x; a[1][0] = GRID_SIZE_Y*recipBoxVecX.y; a[1][1] = GRID_SIZE_Y*recipBoxVecY.y; a[1][2] = GRID_SIZE_Y*recipBoxVecZ.y; a[2][0] = GRID_SIZE_Z*recipBoxVecX.z; a[2][1] = GRID_SIZE_Z*recipBoxVecY.z; a[2][2] = GRID_SIZE_Z*recipBoxVecZ.z; } __syncthreads(); int index1[] = {0, 0, 0, 1, 1, 2}; int index2[] = {0, 1, 2, 1, 2, 2}; __shared__ real b[6][6]; if (threadIdx.x < 36) { int i = threadIdx.x/6; int j = threadIdx.x-6*i; b[i][j] = a[index1[i]][index1[j]]*a[index2[i]][index2[j]]; if (index1[i] != index2[i]) b[i][j] += a[index1[i]][index2[j]]*a[index2[i]][index1[j]]; } __syncthreads(); // Transform the multipoles. real quadScale[] = {1, 2, 2, 1, 2, 1}; for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { for (int j = 0; j < 3; j++) { real dipole = 0; for (int k = 0; k < 3; k++) dipole += a[j][k]*labFrameDipole[3*i+k]; fracDipole[3*i+j] = dipole; } for (int j = 0; j < 6; j++) { real quadrupole = 0; for (int k = 0; k < 5; k++) quadrupole += quadScale[k]*b[j][k]*labFrameQuadrupole[5*i+k]; quadrupole -= quadScale[5]*b[j][5]*(labFrameQuadrupole[5*i]+labFrameQuadrupole[5*i+3]); fracQuadrupole[6*i+j] = quadrupole; } } } /** * Convert the potential from fractional to Cartesian coordinates. */ extern "C" __global__ void transformPotentialToCartesianCoordinates(const real* __restrict__ fphi, real* __restrict__ cphi, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // Build matrices for transforming the potential. __shared__ real a[3][3]; if (threadIdx.x == 0) { a[0][0] = GRID_SIZE_X*recipBoxVecX.x; a[1][0] = GRID_SIZE_X*recipBoxVecY.x; a[2][0] = GRID_SIZE_X*recipBoxVecZ.x; a[0][1] = GRID_SIZE_Y*recipBoxVecX.y; a[1][1] = GRID_SIZE_Y*recipBoxVecY.y; a[2][1] = GRID_SIZE_Y*recipBoxVecZ.y; a[0][2] = GRID_SIZE_Z*recipBoxVecX.z; a[1][2] = GRID_SIZE_Z*recipBoxVecY.z; a[2][2] = GRID_SIZE_Z*recipBoxVecZ.z; } __syncthreads(); int index1[] = {0, 1, 2, 0, 0, 1}; int index2[] = {0, 1, 2, 1, 2, 2}; __shared__ real b[6][6]; if (threadIdx.x < 36) { int i = threadIdx.x/6; int j = threadIdx.x-6*i; b[i][j] = a[index1[i]][index1[j]]*a[index2[i]][index2[j]]; if (index1[j] != index2[j]) b[i][j] += (i < 3 ? b[i][j] : a[index1[i]][index2[j]]*a[index2[i]][index1[j]]); } __syncthreads(); // Transform the potential. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { cphi[10*i] = fphi[20*i]; cphi[10*i+1] = a[0][0]*fphi[20*i+1] + a[0][1]*fphi[20*i+2] + a[0][2]*fphi[20*i+3]; cphi[10*i+2] = a[1][0]*fphi[20*i+1] + a[1][1]*fphi[20*i+2] + a[1][2]*fphi[20*i+3]; cphi[10*i+3] = a[2][0]*fphi[20*i+1] + a[2][1]*fphi[20*i+2] + a[2][2]*fphi[20*i+3]; for (int j = 0; j < 6; j++) { cphi[10*i+4+j] = 0; for (int k = 0; k < 6; k++) cphi[10*i+4+j] += b[j][k]*fphi[20*i+4+k]; } } } extern "C" __global__ void gridSpreadFixedMultipoles(const real4* __restrict__ posq, const real* __restrict__ fracDipole, const real* __restrict__ fracQuadrupole, real2* __restrict__ pmeGrid, int2* __restrict__ pmeAtomGridIndex, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { real array[PME_ORDER*PME_ORDER]; real4 theta1[PME_ORDER]; real4 theta2[PME_ORDER]; real4 theta3[PME_ORDER]; // Process the atoms in spatially sorted order. This improves cache performance when loading // the grid values. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int m = pmeAtomGridIndex[i].x; real4 pos = posq[m]; pos -= periodicBoxVecZ*floor(pos.z*recipBoxVecZ.z+0.5f); pos -= periodicBoxVecY*floor(pos.y*recipBoxVecY.z+0.5f); pos -= periodicBoxVecX*floor(pos.x*recipBoxVecX.z+0.5f); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real w = pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x; real fr = GRID_SIZE_X*(w-(int)(w+0.5f)+0.5f); int ifr = (int) floor(fr); w = fr - ifr; int igrid1 = ifr-PME_ORDER+1; computeBSplinePoint(theta1, w, array); w = pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y; fr = GRID_SIZE_Y*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid2 = ifr-PME_ORDER+1; computeBSplinePoint(theta2, w, array); w = pos.z*recipBoxVecZ.z; fr = GRID_SIZE_Z*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid3 = ifr-PME_ORDER+1; computeBSplinePoint(theta3, w, array); igrid1 += (igrid1 < 0 ? GRID_SIZE_X : 0); igrid2 += (igrid2 < 0 ? GRID_SIZE_Y : 0); igrid3 += (igrid3 < 0 ? GRID_SIZE_Z : 0); // Spread the charge from this atom onto each grid point. for (int ix = 0; ix < PME_ORDER; ix++) { int xbase = igrid1+ix; xbase -= (xbase >= GRID_SIZE_X ? GRID_SIZE_X : 0); xbase = xbase*GRID_SIZE_Y*GRID_SIZE_Z; real4 t = theta1[ix]; for (int iy = 0; iy < PME_ORDER; iy++) { int ybase = igrid2+iy; ybase -= (ybase >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); ybase = xbase + ybase*GRID_SIZE_Z; real4 u = theta2[iy]; for (int iz = 0; iz < PME_ORDER; iz++) { int zindex = igrid3+iz; zindex -= (zindex >= GRID_SIZE_Z ? GRID_SIZE_Z : 0); int index = ybase + zindex; real4 v = theta3[iz]; real atomCharge = pos.w; real atomDipoleX = fracDipole[m*3]; real atomDipoleY = fracDipole[m*3+1]; real atomDipoleZ = fracDipole[m*3+2]; real atomQuadrupoleXX = fracQuadrupole[m*6]; real atomQuadrupoleXY = fracQuadrupole[m*6+1]; real atomQuadrupoleXZ = fracQuadrupole[m*6+2]; real atomQuadrupoleYY = fracQuadrupole[m*6+3]; real atomQuadrupoleYZ = fracQuadrupole[m*6+4]; real atomQuadrupoleZZ = fracQuadrupole[m*6+5]; real term0 = atomCharge*u.x*v.x + atomDipoleY*u.y*v.x + atomDipoleZ*u.x*v.y + atomQuadrupoleYY*u.z*v.x + atomQuadrupoleZZ*u.x*v.z + atomQuadrupoleYZ*u.y*v.y; real term1 = atomDipoleX*u.x*v.x + atomQuadrupoleXY*u.y*v.x + atomQuadrupoleXZ*u.x*v.y; real term2 = atomQuadrupoleXX * u.x * v.x; real add = term0*t.x + term1*t.y + term2*t.z; #ifdef USE_DOUBLE_PRECISION unsigned long long * ulonglong_p = (unsigned long long *) pmeGrid; atomicAdd(&ulonglong_p[2*index], static_cast<unsigned long long>((long long) (add*0x100000000))); #else atomicAdd(&pmeGrid[index].x, add); #endif } } } } } extern "C" __global__ void gridSpreadInducedDipoles(const real4* __restrict__ posq, const real* __restrict__ inducedDipole, const real* __restrict__ inducedDipolePolar, real2* __restrict__ pmeGrid, int2* __restrict__ pmeAtomGridIndex, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { real array[PME_ORDER*PME_ORDER]; real4 theta1[PME_ORDER]; real4 theta2[PME_ORDER]; real4 theta3[PME_ORDER]; __shared__ real cartToFrac[3][3]; if (threadIdx.x == 0) { cartToFrac[0][0] = GRID_SIZE_X*recipBoxVecX.x; cartToFrac[0][1] = GRID_SIZE_X*recipBoxVecY.x; cartToFrac[0][2] = GRID_SIZE_X*recipBoxVecZ.x; cartToFrac[1][0] = GRID_SIZE_Y*recipBoxVecX.y; cartToFrac[1][1] = GRID_SIZE_Y*recipBoxVecY.y; cartToFrac[1][2] = GRID_SIZE_Y*recipBoxVecZ.y; cartToFrac[2][0] = GRID_SIZE_Z*recipBoxVecX.z; cartToFrac[2][1] = GRID_SIZE_Z*recipBoxVecY.z; cartToFrac[2][2] = GRID_SIZE_Z*recipBoxVecZ.z; } __syncthreads(); // Process the atoms in spatially sorted order. This improves cache performance when loading // the grid values. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int m = pmeAtomGridIndex[i].x; real4 pos = posq[m]; pos -= periodicBoxVecZ*floor(pos.z*recipBoxVecZ.z+0.5f); pos -= periodicBoxVecY*floor(pos.y*recipBoxVecY.z+0.5f); pos -= periodicBoxVecX*floor(pos.x*recipBoxVecX.z+0.5f); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real w = pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x; real fr = GRID_SIZE_X*(w-(int)(w+0.5f)+0.5f); int ifr = (int) floor(fr); w = fr - ifr; int igrid1 = ifr-PME_ORDER+1; computeBSplinePoint(theta1, w, array); w = pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y; fr = GRID_SIZE_Y*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid2 = ifr-PME_ORDER+1; computeBSplinePoint(theta2, w, array); w = pos.z*recipBoxVecZ.z; fr = GRID_SIZE_Z*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid3 = ifr-PME_ORDER+1; computeBSplinePoint(theta3, w, array); igrid1 += (igrid1 < 0 ? GRID_SIZE_X : 0); igrid2 += (igrid2 < 0 ? GRID_SIZE_Y : 0); igrid3 += (igrid3 < 0 ? GRID_SIZE_Z : 0); // Spread the charge from this atom onto each grid point. for (int ix = 0; ix < PME_ORDER; ix++) { int xbase = igrid1+ix; xbase -= (xbase >= GRID_SIZE_X ? GRID_SIZE_X : 0); xbase = xbase*GRID_SIZE_Y*GRID_SIZE_Z; real4 t = theta1[ix]; for (int iy = 0; iy < PME_ORDER; iy++) { int ybase = igrid2+iy; ybase -= (ybase >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); ybase = xbase + ybase*GRID_SIZE_Z; real4 u = theta2[iy]; for (int iz = 0; iz < PME_ORDER; iz++) { int zindex = igrid3+iz; zindex -= (zindex >= GRID_SIZE_Z ? GRID_SIZE_Z : 0); int index = ybase + zindex; real4 v = theta3[iz]; real3 cinducedDipole = make_real3(inducedDipole[m*3], inducedDipole[m*3+1], inducedDipole[m*3+2]); real3 cinducedDipolePolar = make_real3(inducedDipolePolar[m*3], inducedDipolePolar[m*3+1], inducedDipolePolar[m*3+2]); real3 finducedDipole = make_real3(cinducedDipole.x*cartToFrac[0][0] + cinducedDipole.y*cartToFrac[0][1] + cinducedDipole.z*cartToFrac[0][2], cinducedDipole.x*cartToFrac[1][0] + cinducedDipole.y*cartToFrac[1][1] + cinducedDipole.z*cartToFrac[1][2], cinducedDipole.x*cartToFrac[2][0] + cinducedDipole.y*cartToFrac[2][1] + cinducedDipole.z*cartToFrac[2][2]); real3 finducedDipolePolar = make_real3(cinducedDipolePolar.x*cartToFrac[0][0] + cinducedDipolePolar.y*cartToFrac[0][1] + cinducedDipolePolar.z*cartToFrac[0][2], cinducedDipolePolar.x*cartToFrac[1][0] + cinducedDipolePolar.y*cartToFrac[1][1] + cinducedDipolePolar.z*cartToFrac[1][2], cinducedDipolePolar.x*cartToFrac[2][0] + cinducedDipolePolar.y*cartToFrac[2][1] + cinducedDipolePolar.z*cartToFrac[2][2]); real term01 = finducedDipole.y*u.y*v.x + finducedDipole.z*u.x*v.y; real term11 = finducedDipole.x*u.x*v.x; real term02 = finducedDipolePolar.y*u.y*v.x + finducedDipolePolar.z*u.x*v.y; real term12 = finducedDipolePolar.x*u.x*v.x; real add1 = term01*t.x + term11*t.y; real add2 = term02*t.x + term12*t.y; #ifdef USE_DOUBLE_PRECISION unsigned long long * ulonglong_p = (unsigned long long *) pmeGrid; atomicAdd(&ulonglong_p[2*index], static_cast<unsigned long long>((long long) (add1*0x100000000))); atomicAdd(&ulonglong_p[2*index+1], static_cast<unsigned long long>((long long) (add2*0x100000000))); #else atomicAdd(&pmeGrid[index].x, add1); atomicAdd(&pmeGrid[index].y, add2); #endif } } } } } /** * In double precision, we have to use fixed point to accumulate the grid values, so convert them to floating point. */ extern "C" __global__ void finishSpreadCharge(long long* __restrict__ pmeGrid) { real* floatGrid = (real*) pmeGrid; const unsigned int gridSize = 2*GRID_SIZE_X*GRID_SIZE_Y*GRID_SIZE_Z; real scale = 1/(real) 0x100000000; for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) floatGrid[index] = scale*pmeGrid[index]; } extern "C" __global__ void reciprocalConvolution(real2* __restrict__ pmeGrid, const real* __restrict__ pmeBsplineModuliX, const real* __restrict__ pmeBsplineModuliY, const real* __restrict__ pmeBsplineModuliZ, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { const unsigned int gridSize = GRID_SIZE_X*GRID_SIZE_Y*GRID_SIZE_Z; real expFactor = M_PI*M_PI/(EWALD_ALPHA*EWALD_ALPHA); real scaleFactor = RECIP(M_PI*periodicBoxSize.x*periodicBoxSize.y*periodicBoxSize.z); for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) { int kx = index/(GRID_SIZE_Y*GRID_SIZE_Z); int remainder = index-kx*GRID_SIZE_Y*GRID_SIZE_Z; int ky = remainder/GRID_SIZE_Z; int kz = remainder-ky*GRID_SIZE_Z; if (kx == 0 && ky == 0 && kz == 0) { pmeGrid[index] = make_real2(0, 0); continue; } int mx = (kx < (GRID_SIZE_X+1)/2) ? kx : (kx-GRID_SIZE_X); int my = (ky < (GRID_SIZE_Y+1)/2) ? ky : (ky-GRID_SIZE_Y); int mz = (kz < (GRID_SIZE_Z+1)/2) ? kz : (kz-GRID_SIZE_Z); real mhx = mx*recipBoxVecX.x; real mhy = mx*recipBoxVecY.x+my*recipBoxVecY.y; real mhz = mx*recipBoxVecZ.x+my*recipBoxVecZ.y+mz*recipBoxVecZ.z; real bx = pmeBsplineModuliX[kx]; real by = pmeBsplineModuliY[ky]; real bz = pmeBsplineModuliZ[kz]; real2 grid = pmeGrid[index]; real m2 = mhx*mhx+mhy*mhy+mhz*mhz; real denom = m2*bx*by*bz; real eterm = scaleFactor*EXP(-expFactor*m2)/denom; pmeGrid[index] = make_real2(grid.x*eterm, grid.y*eterm); } } extern "C" __global__ void computeFixedPotentialFromGrid(const real2* __restrict__ pmeGrid, real* __restrict__ phi, long long* __restrict__ fieldBuffers, long long* __restrict__ fieldPolarBuffers, const real4* __restrict__ posq, const real* __restrict__ labFrameDipole, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ, int2* __restrict__ pmeAtomGridIndex) { real array[PME_ORDER*PME_ORDER]; real4 theta1[PME_ORDER]; real4 theta2[PME_ORDER]; real4 theta3[PME_ORDER]; __shared__ real fracToCart[3][3]; if (threadIdx.x == 0) { fracToCart[0][0] = GRID_SIZE_X*recipBoxVecX.x; fracToCart[1][0] = GRID_SIZE_X*recipBoxVecY.x; fracToCart[2][0] = GRID_SIZE_X*recipBoxVecZ.x; fracToCart[0][1] = GRID_SIZE_Y*recipBoxVecX.y; fracToCart[1][1] = GRID_SIZE_Y*recipBoxVecY.y; fracToCart[2][1] = GRID_SIZE_Y*recipBoxVecZ.y; fracToCart[0][2] = GRID_SIZE_Z*recipBoxVecX.z; fracToCart[1][2] = GRID_SIZE_Z*recipBoxVecY.z; fracToCart[2][2] = GRID_SIZE_Z*recipBoxVecZ.z; } __syncthreads(); // Process the atoms in spatially sorted order. This improves cache performance when loading // the grid values. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int m = pmeAtomGridIndex[i].x; real4 pos = posq[m]; pos -= periodicBoxVecZ*floor(pos.z*recipBoxVecZ.z+0.5f); pos -= periodicBoxVecY*floor(pos.y*recipBoxVecY.z+0.5f); pos -= periodicBoxVecX*floor(pos.x*recipBoxVecX.z+0.5f); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real w = pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x; real fr = GRID_SIZE_X*(w-(int)(w+0.5f)+0.5f); int ifr = (int) floor(fr); w = fr - ifr; int igrid1 = ifr-PME_ORDER+1; computeBSplinePoint(theta1, w, array); w = pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y; fr = GRID_SIZE_Y*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid2 = ifr-PME_ORDER+1; computeBSplinePoint(theta2, w, array); w = pos.z*recipBoxVecZ.z; fr = GRID_SIZE_Z*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid3 = ifr-PME_ORDER+1; computeBSplinePoint(theta3, w, array); igrid1 += (igrid1 < 0 ? GRID_SIZE_X : 0); igrid2 += (igrid2 < 0 ? GRID_SIZE_Y : 0); igrid3 += (igrid3 < 0 ? GRID_SIZE_Z : 0); // Compute the potential from this grid point. real tuv000 = 0; real tuv001 = 0; real tuv010 = 0; real tuv100 = 0; real tuv200 = 0; real tuv020 = 0; real tuv002 = 0; real tuv110 = 0; real tuv101 = 0; real tuv011 = 0; real tuv300 = 0; real tuv030 = 0; real tuv003 = 0; real tuv210 = 0; real tuv201 = 0; real tuv120 = 0; real tuv021 = 0; real tuv102 = 0; real tuv012 = 0; real tuv111 = 0; for (int iz = 0; iz < PME_ORDER; iz++) { int k = igrid3+iz-(igrid3+iz >= GRID_SIZE_Z ? GRID_SIZE_Z : 0); real4 v = theta3[iz]; real tu00 = 0; real tu10 = 0; real tu01 = 0; real tu20 = 0; real tu11 = 0; real tu02 = 0; real tu30 = 0; real tu21 = 0; real tu12 = 0; real tu03 = 0; for (int iy = 0; iy < PME_ORDER; iy++) { int j = igrid2+iy-(igrid2+iy >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); real4 u = theta2[iy]; real4 t = make_real4(0, 0, 0, 0); for (int ix = 0; ix < PME_ORDER; ix++) { int i = igrid1+ix-(igrid1+ix >= GRID_SIZE_X ? GRID_SIZE_X : 0); int gridIndex = i*GRID_SIZE_Y*GRID_SIZE_Z + j*GRID_SIZE_Z + k; real tq = pmeGrid[gridIndex].x; real4 tadd = theta1[ix]; t.x += tq*tadd.x; t.y += tq*tadd.y; t.z += tq*tadd.z; t.w += tq*tadd.w; } tu00 += t.x*u.x; tu10 += t.y*u.x; tu01 += t.x*u.y; tu20 += t.z*u.x; tu11 += t.y*u.y; tu02 += t.x*u.z; tu30 += t.w*u.x; tu21 += t.z*u.y; tu12 += t.y*u.z; tu03 += t.x*u.w; } tuv000 += tu00*v.x; tuv100 += tu10*v.x; tuv010 += tu01*v.x; tuv001 += tu00*v.y; tuv200 += tu20*v.x; tuv020 += tu02*v.x; tuv002 += tu00*v.z; tuv110 += tu11*v.x; tuv101 += tu10*v.y; tuv011 += tu01*v.y; tuv300 += tu30*v.x; tuv030 += tu03*v.x; tuv003 += tu00*v.w; tuv210 += tu21*v.x; tuv201 += tu20*v.y; tuv120 += tu12*v.x; tuv021 += tu02*v.y; tuv102 += tu10*v.z; tuv012 += tu01*v.z; tuv111 += tu11*v.y; } phi[20*m] = tuv000; phi[20*m+1] = tuv100; phi[20*m+2] = tuv010; phi[20*m+3] = tuv001; phi[20*m+4] = tuv200; phi[20*m+5] = tuv020; phi[20*m+6] = tuv002; phi[20*m+7] = tuv110; phi[20*m+8] = tuv101; phi[20*m+9] = tuv011; phi[20*m+10] = tuv300; phi[20*m+11] = tuv030; phi[20*m+12] = tuv003; phi[20*m+13] = tuv210; phi[20*m+14] = tuv201; phi[20*m+15] = tuv120; phi[20*m+16] = tuv021; phi[20*m+17] = tuv102; phi[20*m+18] = tuv012; phi[20*m+19] = tuv111; real dipoleScale = (4/(real) 3)*(EWALD_ALPHA*EWALD_ALPHA*EWALD_ALPHA)/SQRT_PI; long long fieldx = (long long) ((dipoleScale*labFrameDipole[m*3]-tuv100*fracToCart[0][0]-tuv010*fracToCart[0][1]-tuv001*fracToCart[0][2])*0x100000000); fieldBuffers[m] = fieldx; fieldPolarBuffers[m] = fieldx; long long fieldy = (long long) ((dipoleScale*labFrameDipole[m*3+1]-tuv100*fracToCart[1][0]-tuv010*fracToCart[1][1]-tuv001*fracToCart[1][2])*0x100000000); fieldBuffers[m+PADDED_NUM_ATOMS] = fieldy; fieldPolarBuffers[m+PADDED_NUM_ATOMS] = fieldy; long long fieldz = (long long) ((dipoleScale*labFrameDipole[m*3+2]-tuv100*fracToCart[2][0]-tuv010*fracToCart[2][1]-tuv001*fracToCart[2][2])*0x100000000); fieldBuffers[m+2*PADDED_NUM_ATOMS] = fieldz; fieldPolarBuffers[m+2*PADDED_NUM_ATOMS] = fieldz; } } extern "C" __global__ void computeInducedPotentialFromGrid(const real2* __restrict__ pmeGrid, real* __restrict__ phid, real* __restrict__ phip, real* __restrict__ phidp, const real4* __restrict__ posq, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ, int2* __restrict__ pmeAtomGridIndex) { real array[PME_ORDER*PME_ORDER]; real4 theta1[PME_ORDER]; real4 theta2[PME_ORDER]; real4 theta3[PME_ORDER]; // Process the atoms in spatially sorted order. This improves cache performance when loading // the grid values. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int m = pmeAtomGridIndex[i].x; real4 pos = posq[m]; pos -= periodicBoxVecZ*floor(pos.z*recipBoxVecZ.z+0.5f); pos -= periodicBoxVecY*floor(pos.y*recipBoxVecY.z+0.5f); pos -= periodicBoxVecX*floor(pos.x*recipBoxVecX.z+0.5f); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real w = pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x; real fr = GRID_SIZE_X*(w-(int)(w+0.5f)+0.5f); int ifr = (int) floor(fr); w = fr - ifr; int igrid1 = ifr-PME_ORDER+1; computeBSplinePoint(theta1, w, array); w = pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y; fr = GRID_SIZE_Y*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid2 = ifr-PME_ORDER+1; computeBSplinePoint(theta2, w, array); w = pos.z*recipBoxVecZ.z; fr = GRID_SIZE_Z*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid3 = ifr-PME_ORDER+1; computeBSplinePoint(theta3, w, array); igrid1 += (igrid1 < 0 ? GRID_SIZE_X : 0); igrid2 += (igrid2 < 0 ? GRID_SIZE_Y : 0); igrid3 += (igrid3 < 0 ? GRID_SIZE_Z : 0); // Compute the potential from this grid point. real tuv100_1 = 0; real tuv010_1 = 0; real tuv001_1 = 0; real tuv200_1 = 0; real tuv020_1 = 0; real tuv002_1 = 0; real tuv110_1 = 0; real tuv101_1 = 0; real tuv011_1 = 0; real tuv100_2 = 0; real tuv010_2 = 0; real tuv001_2 = 0; real tuv200_2 = 0; real tuv020_2 = 0; real tuv002_2 = 0; real tuv110_2 = 0; real tuv101_2 = 0; real tuv011_2 = 0; real tuv000 = 0; real tuv001 = 0; real tuv010 = 0; real tuv100 = 0; real tuv200 = 0; real tuv020 = 0; real tuv002 = 0; real tuv110 = 0; real tuv101 = 0; real tuv011 = 0; real tuv300 = 0; real tuv030 = 0; real tuv003 = 0; real tuv210 = 0; real tuv201 = 0; real tuv120 = 0; real tuv021 = 0; real tuv102 = 0; real tuv012 = 0; real tuv111 = 0; for (int iz = 0; iz < PME_ORDER; iz++) { int k = igrid3+iz-(igrid3+iz >= GRID_SIZE_Z ? GRID_SIZE_Z : 0); real4 v = theta3[iz]; real tu00_1 = 0; real tu01_1 = 0; real tu10_1 = 0; real tu20_1 = 0; real tu11_1 = 0; real tu02_1 = 0; real tu00_2 = 0; real tu01_2 = 0; real tu10_2 = 0; real tu20_2 = 0; real tu11_2 = 0; real tu02_2 = 0; real tu00 = 0; real tu10 = 0; real tu01 = 0; real tu20 = 0; real tu11 = 0; real tu02 = 0; real tu30 = 0; real tu21 = 0; real tu12 = 0; real tu03 = 0; for (int iy = 0; iy < PME_ORDER; iy++) { int j = igrid2+iy-(igrid2+iy >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); real4 u = theta2[iy]; real t0_1 = 0; real t1_1 = 0; real t2_1 = 0; real t0_2 = 0; real t1_2 = 0; real t2_2 = 0; real t3 = 0; for (int ix = 0; ix < PME_ORDER; ix++) { int i = igrid1+ix-(igrid1+ix >= GRID_SIZE_X ? GRID_SIZE_X : 0); int gridIndex = i*GRID_SIZE_Y*GRID_SIZE_Z + j*GRID_SIZE_Z + k; real2 tq = pmeGrid[gridIndex]; real4 tadd = theta1[ix]; t0_1 += tq.x*tadd.x; t1_1 += tq.x*tadd.y; t2_1 += tq.x*tadd.z; t0_2 += tq.y*tadd.x; t1_2 += tq.y*tadd.y; t2_2 += tq.y*tadd.z; t3 += (tq.x+tq.y)*tadd.w; } tu00_1 += t0_1*u.x; tu10_1 += t1_1*u.x; tu01_1 += t0_1*u.y; tu20_1 += t2_1*u.x; tu11_1 += t1_1*u.y; tu02_1 += t0_1*u.z; tu00_2 += t0_2*u.x; tu10_2 += t1_2*u.x; tu01_2 += t0_2*u.y; tu20_2 += t2_2*u.x; tu11_2 += t1_2*u.y; tu02_2 += t0_2*u.z; real t0 = t0_1 + t0_2; real t1 = t1_1 + t1_2; real t2 = t2_1 + t2_2; tu00 += t0*u.x; tu10 += t1*u.x; tu01 += t0*u.y; tu20 += t2*u.x; tu11 += t1*u.y; tu02 += t0*u.z; tu30 += t3*u.x; tu21 += t2*u.y; tu12 += t1*u.z; tu03 += t0*u.w; } tuv100_1 += tu10_1*v.x; tuv010_1 += tu01_1*v.x; tuv001_1 += tu00_1*v.y; tuv200_1 += tu20_1*v.x; tuv020_1 += tu02_1*v.x; tuv002_1 += tu00_1*v.z; tuv110_1 += tu11_1*v.x; tuv101_1 += tu10_1*v.y; tuv011_1 += tu01_1*v.y; tuv100_2 += tu10_2*v.x; tuv010_2 += tu01_2*v.x; tuv001_2 += tu00_2*v.y; tuv200_2 += tu20_2*v.x; tuv020_2 += tu02_2*v.x; tuv002_2 += tu00_2*v.z; tuv110_2 += tu11_2*v.x; tuv101_2 += tu10_2*v.y; tuv011_2 += tu01_2*v.y; tuv000 += tu00*v.x; tuv100 += tu10*v.x; tuv010 += tu01*v.x; tuv001 += tu00*v.y; tuv200 += tu20*v.x; tuv020 += tu02*v.x; tuv002 += tu00*v.z; tuv110 += tu11*v.x; tuv101 += tu10*v.y; tuv011 += tu01*v.y; tuv300 += tu30*v.x; tuv030 += tu03*v.x; tuv003 += tu00*v.w; tuv210 += tu21*v.x; tuv201 += tu20*v.y; tuv120 += tu12*v.x; tuv021 += tu02*v.y; tuv102 += tu10*v.z; tuv012 += tu01*v.z; tuv111 += tu11*v.y; } phid[10*m] = 0; phid[10*m+1] = tuv100_1; phid[10*m+2] = tuv010_1; phid[10*m+3] = tuv001_1; phid[10*m+4] = tuv200_1; phid[10*m+5] = tuv020_1; phid[10*m+6] = tuv002_1; phid[10*m+7] = tuv110_1; phid[10*m+8] = tuv101_1; phid[10*m+9] = tuv011_1; phip[10*m] = 0; phip[10*m+1] = tuv100_2; phip[10*m+2] = tuv010_2; phip[10*m+3] = tuv001_2; phip[10*m+4] = tuv200_2; phip[10*m+5] = tuv020_2; phip[10*m+6] = tuv002_2; phip[10*m+7] = tuv110_2; phip[10*m+8] = tuv101_2; phip[10*m+9] = tuv011_2; phidp[20*m] = tuv000; phidp[20*m+1] = tuv100; phidp[20*m+2] = tuv010; phidp[20*m+3] = tuv001; phidp[20*m+4] = tuv200; phidp[20*m+5] = tuv020; phidp[20*m+6] = tuv002; phidp[20*m+7] = tuv110; phidp[20*m+8] = tuv101; phidp[20*m+9] = tuv011; phidp[20*m+10] = tuv300; phidp[20*m+11] = tuv030; phidp[20*m+12] = tuv003; phidp[20*m+13] = tuv210; phidp[20*m+14] = tuv201; phidp[20*m+15] = tuv120; phidp[20*m+16] = tuv021; phidp[20*m+17] = tuv102; phidp[20*m+18] = tuv012; phidp[20*m+19] = tuv111; } } extern "C" __global__ void computeFixedMultipoleForceAndEnergy(real4* __restrict__ posq, unsigned long long* __restrict__ forceBuffers, long long* __restrict__ torqueBuffers, real* __restrict__ energyBuffer, const real* __restrict__ labFrameDipole, const real* __restrict__ labFrameQuadrupole, const real* __restrict__ fracDipole, const real* __restrict__ fracQuadrupole, const real* __restrict__ phi_global, const real* __restrict__ cphi_global, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { real multipole[10]; const int deriv1[] = {1, 4, 7, 8, 10, 15, 17, 13, 14, 19}; const int deriv2[] = {2, 7, 5, 9, 13, 11, 18, 15, 19, 16}; const int deriv3[] = {3, 8, 9, 6, 14, 16, 12, 19, 17, 18}; real energy = 0; __shared__ real fracToCart[3][3]; if (threadIdx.x == 0) { fracToCart[0][0] = GRID_SIZE_X*recipBoxVecX.x; fracToCart[1][0] = GRID_SIZE_X*recipBoxVecY.x; fracToCart[2][0] = GRID_SIZE_X*recipBoxVecZ.x; fracToCart[0][1] = GRID_SIZE_Y*recipBoxVecX.y; fracToCart[1][1] = GRID_SIZE_Y*recipBoxVecY.y; fracToCart[2][1] = GRID_SIZE_Y*recipBoxVecZ.y; fracToCart[0][2] = GRID_SIZE_Z*recipBoxVecX.z; fracToCart[1][2] = GRID_SIZE_Z*recipBoxVecY.z; fracToCart[2][2] = GRID_SIZE_Z*recipBoxVecZ.z; } __syncthreads(); for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { // Compute the torque. multipole[0] = posq[i].w; multipole[1] = labFrameDipole[i*3]; multipole[2] = labFrameDipole[i*3+1]; multipole[3] = labFrameDipole[i*3+2]; multipole[4] = labFrameQuadrupole[i*5]; multipole[5] = labFrameQuadrupole[i*5+3]; multipole[6] = -(multipole[4]+multipole[5]); multipole[7] = 2*labFrameQuadrupole[i*5+1]; multipole[8] = 2*labFrameQuadrupole[i*5+2]; multipole[9] = 2*labFrameQuadrupole[i*5+4]; const real* cphi = &cphi_global[10*i]; torqueBuffers[i] = (long long) (EPSILON_FACTOR*(multipole[3]*cphi[2] - multipole[2]*cphi[3] + 2*(multipole[6]-multipole[5])*cphi[9] + multipole[8]*cphi[7] + multipole[9]*cphi[5] - multipole[7]*cphi[8] - multipole[9]*cphi[6])*0x100000000); torqueBuffers[i+PADDED_NUM_ATOMS] = (long long) (EPSILON_FACTOR*(multipole[1]*cphi[3] - multipole[3]*cphi[1] + 2*(multipole[4]-multipole[6])*cphi[8] + multipole[7]*cphi[9] + multipole[8]*cphi[6] - multipole[8]*cphi[4] - multipole[9]*cphi[7])*0x100000000); torqueBuffers[i+PADDED_NUM_ATOMS*2] = (long long) (EPSILON_FACTOR*(multipole[2]*cphi[1] - multipole[1]*cphi[2] + 2*(multipole[5]-multipole[4])*cphi[7] + multipole[7]*cphi[4] + multipole[9]*cphi[8] - multipole[7]*cphi[5] - multipole[8]*cphi[9])*0x100000000); // Compute the force and energy. multipole[1] = fracDipole[i*3]; multipole[2] = fracDipole[i*3+1]; multipole[3] = fracDipole[i*3+2]; multipole[4] = fracQuadrupole[i*6]; multipole[5] = fracQuadrupole[i*6+3]; multipole[6] = fracQuadrupole[i*6+5]; multipole[7] = fracQuadrupole[i*6+1]; multipole[8] = fracQuadrupole[i*6+2]; multipole[9] = fracQuadrupole[i*6+4]; const real* phi = &phi_global[20*i]; real4 f = make_real4(0, 0, 0, 0); for (int k = 0; k < 10; k++) { energy += multipole[k]*phi[k]; f.x += multipole[k]*phi[deriv1[k]]; f.y += multipole[k]*phi[deriv2[k]]; f.z += multipole[k]*phi[deriv3[k]]; } f = make_real4(EPSILON_FACTOR*(f.x*fracToCart[0][0] + f.y*fracToCart[0][1] + f.z*fracToCart[0][2]), EPSILON_FACTOR*(f.x*fracToCart[1][0] + f.y*fracToCart[1][1] + f.z*fracToCart[1][2]), EPSILON_FACTOR*(f.x*fracToCart[2][0] + f.y*fracToCart[2][1] + f.z*fracToCart[2][2]), 0); forceBuffers[i] -= static_cast<unsigned long long>((long long) (f.x*0x100000000)); forceBuffers[i+PADDED_NUM_ATOMS] -= static_cast<unsigned long long>((long long) (f.y*0x100000000)); forceBuffers[i+PADDED_NUM_ATOMS*2] -= static_cast<unsigned long long>((long long) (f.z*0x100000000)); } energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += 0.5f*EPSILON_FACTOR*energy; } extern "C" __global__ void computeInducedDipoleForceAndEnergy(real4* __restrict__ posq, unsigned long long* __restrict__ forceBuffers, long long* __restrict__ torqueBuffers, real* __restrict__ energyBuffer, const real* __restrict__ labFrameDipole, const real* __restrict__ labFrameQuadrupole, const real* __restrict__ fracDipole, const real* __restrict__ fracQuadrupole, const real* __restrict__ inducedDipole_global, const real* __restrict__ inducedDipolePolar_global, const real* __restrict__ phi_global, const real* __restrict__ phid_global, const real* __restrict__ phip_global, const real* __restrict__ phidp_global, const real* __restrict__ cphi_global, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { real multipole[10]; real cinducedDipole[3], inducedDipole[3]; real cinducedDipolePolar[3], inducedDipolePolar[3]; const int deriv1[] = {1, 4, 7, 8, 10, 15, 17, 13, 14, 19}; const int deriv2[] = {2, 7, 5, 9, 13, 11, 18, 15, 19, 16}; const int deriv3[] = {3, 8, 9, 6, 14, 16, 12, 19, 17, 18}; real energy = 0; __shared__ real fracToCart[3][3]; if (threadIdx.x == 0) { fracToCart[0][0] = GRID_SIZE_X*recipBoxVecX.x; fracToCart[1][0] = GRID_SIZE_X*recipBoxVecY.x; fracToCart[2][0] = GRID_SIZE_X*recipBoxVecZ.x; fracToCart[0][1] = GRID_SIZE_Y*recipBoxVecX.y; fracToCart[1][1] = GRID_SIZE_Y*recipBoxVecY.y; fracToCart[2][1] = GRID_SIZE_Y*recipBoxVecZ.y; fracToCart[0][2] = GRID_SIZE_Z*recipBoxVecX.z; fracToCart[1][2] = GRID_SIZE_Z*recipBoxVecY.z; fracToCart[2][2] = GRID_SIZE_Z*recipBoxVecZ.z; } __syncthreads(); for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { // Compute the torque. multipole[0] = posq[i].w; multipole[1] = labFrameDipole[i*3]; multipole[2] = labFrameDipole[i*3+1]; multipole[3] = labFrameDipole[i*3+2]; multipole[4] = labFrameQuadrupole[i*5]; multipole[5] = labFrameQuadrupole[i*5+3]; multipole[6] = -(multipole[4]+multipole[5]); multipole[7] = 2*labFrameQuadrupole[i*5+1]; multipole[8] = 2*labFrameQuadrupole[i*5+2]; multipole[9] = 2*labFrameQuadrupole[i*5+4]; const real* cphi = &cphi_global[10*i]; torqueBuffers[i] += (long long) (0.5f*EPSILON_FACTOR*(multipole[3]*cphi[2] - multipole[2]*cphi[3] + 2*(multipole[6]-multipole[5])*cphi[9] + multipole[8]*cphi[7] + multipole[9]*cphi[5] - multipole[7]*cphi[8] - multipole[9]*cphi[6])*0x100000000); torqueBuffers[i+PADDED_NUM_ATOMS] += (long long) (0.5f*EPSILON_FACTOR*(multipole[1]*cphi[3] - multipole[3]*cphi[1] + 2*(multipole[4]-multipole[6])*cphi[8] + multipole[7]*cphi[9] + multipole[8]*cphi[6] - multipole[8]*cphi[4] - multipole[9]*cphi[7])*0x100000000); torqueBuffers[i+PADDED_NUM_ATOMS*2] += (long long) (0.5f*EPSILON_FACTOR*(multipole[2]*cphi[1] - multipole[1]*cphi[2] + 2*(multipole[5]-multipole[4])*cphi[7] + multipole[7]*cphi[4] + multipole[9]*cphi[8] - multipole[7]*cphi[5] - multipole[8]*cphi[9])*0x100000000); // Compute the force and energy. multipole[1] = fracDipole[i*3]; multipole[2] = fracDipole[i*3+1]; multipole[3] = fracDipole[i*3+2]; multipole[4] = fracQuadrupole[i*6]; multipole[5] = fracQuadrupole[i*6+3]; multipole[6] = fracQuadrupole[i*6+5]; multipole[7] = fracQuadrupole[i*6+1]; multipole[8] = fracQuadrupole[i*6+2]; multipole[9] = fracQuadrupole[i*6+4]; cinducedDipole[0] = inducedDipole_global[i*3]; cinducedDipole[1] = inducedDipole_global[i*3+1]; cinducedDipole[2] = inducedDipole_global[i*3+2]; cinducedDipolePolar[0] = inducedDipolePolar_global[i*3]; cinducedDipolePolar[1] = inducedDipolePolar_global[i*3+1]; cinducedDipolePolar[2] = inducedDipolePolar_global[i*3+2]; // Multiply the dipoles by cartToFrac, which is just the transpose of fracToCart. inducedDipole[0] = cinducedDipole[0]*fracToCart[0][0] + cinducedDipole[1]*fracToCart[1][0] + cinducedDipole[2]*fracToCart[2][0]; inducedDipole[1] = cinducedDipole[0]*fracToCart[0][1] + cinducedDipole[1]*fracToCart[1][1] + cinducedDipole[2]*fracToCart[2][1]; inducedDipole[2] = cinducedDipole[0]*fracToCart[0][2] + cinducedDipole[1]*fracToCart[1][2] + cinducedDipole[2]*fracToCart[2][2]; inducedDipolePolar[0] = cinducedDipolePolar[0]*fracToCart[0][0] + cinducedDipolePolar[1]*fracToCart[1][0] + cinducedDipolePolar[2]*fracToCart[2][0]; inducedDipolePolar[1] = cinducedDipolePolar[0]*fracToCart[0][1] + cinducedDipolePolar[1]*fracToCart[1][1] + cinducedDipolePolar[2]*fracToCart[2][1]; inducedDipolePolar[2] = cinducedDipolePolar[0]*fracToCart[0][2] + cinducedDipolePolar[1]*fracToCart[1][2] + cinducedDipolePolar[2]*fracToCart[2][2]; const real* phi = &phi_global[20*i]; const real* phip = &phip_global[10*i]; const real* phid = &phid_global[10*i]; real4 f = make_real4(0, 0, 0, 0); energy += inducedDipole[0]*phi[1]; energy += inducedDipole[1]*phi[2]; energy += inducedDipole[2]*phi[3]; for (int k = 0; k < 3; k++) { int j1 = deriv1[k+1]; int j2 = deriv2[k+1]; int j3 = deriv3[k+1]; f.x += (inducedDipole[k]+inducedDipolePolar[k])*phi[j1]; f.y += (inducedDipole[k]+inducedDipolePolar[k])*phi[j2]; f.z += (inducedDipole[k]+inducedDipolePolar[k])*phi[j3]; #ifndef DIRECT_POLARIZATION f.x += (inducedDipole[k]*phip[j1] + inducedDipolePolar[k]*phid[j1]); f.y += (inducedDipole[k]*phip[j2] + inducedDipolePolar[k]*phid[j2]); f.z += (inducedDipole[k]*phip[j3] + inducedDipolePolar[k]*phid[j3]); #endif } const real* phidp = &phidp_global[20*i]; for (int k = 0; k < 10; k++) { f.x += multipole[k]*phidp[deriv1[k]]; f.y += multipole[k]*phidp[deriv2[k]]; f.z += multipole[k]*phidp[deriv3[k]]; } f = make_real4(0.5f*EPSILON_FACTOR*(f.x*fracToCart[0][0] + f.y*fracToCart[0][1] + f.z*fracToCart[0][2]), 0.5f*EPSILON_FACTOR*(f.x*fracToCart[1][0] + f.y*fracToCart[1][1] + f.z*fracToCart[1][2]), 0.5f*EPSILON_FACTOR*(f.x*fracToCart[2][0] + f.y*fracToCart[2][1] + f.z*fracToCart[2][2]), 0); forceBuffers[i] -= static_cast<unsigned long long>((long long) (f.x*0x100000000)); forceBuffers[i+PADDED_NUM_ATOMS] -= static_cast<unsigned long long>((long long) (f.y*0x100000000)); forceBuffers[i+PADDED_NUM_ATOMS*2] -= static_cast<unsigned long long>((long long) (f.z*0x100000000)); } energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += 0.5f*EPSILON_FACTOR*energy; } extern "C" __global__ void recordInducedFieldDipoles(const real* __restrict__ phid, real* const __restrict__ phip, long long* __restrict__ inducedField, long long* __restrict__ inducedFieldPolar, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { __shared__ real fracToCart[3][3]; if (threadIdx.x == 0) { fracToCart[0][0] = GRID_SIZE_X*recipBoxVecX.x; fracToCart[1][0] = GRID_SIZE_X*recipBoxVecY.x; fracToCart[2][0] = GRID_SIZE_X*recipBoxVecZ.x; fracToCart[0][1] = GRID_SIZE_Y*recipBoxVecX.y; fracToCart[1][1] = GRID_SIZE_Y*recipBoxVecY.y; fracToCart[2][1] = GRID_SIZE_Y*recipBoxVecZ.y; fracToCart[0][2] = GRID_SIZE_Z*recipBoxVecX.z; fracToCart[1][2] = GRID_SIZE_Z*recipBoxVecY.z; fracToCart[2][2] = GRID_SIZE_Z*recipBoxVecZ.z; } __syncthreads(); for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { inducedField[i] -= (long long) (0x100000000*(phid[10*i+1]*fracToCart[0][0] + phid[10*i+2]*fracToCart[0][1] + phid[10*i+3]*fracToCart[0][2])); inducedField[i+PADDED_NUM_ATOMS] -= (long long) (0x100000000*(phid[10*i+1]*fracToCart[1][0] + phid[10*i+2]*fracToCart[1][1] + phid[10*i+3]*fracToCart[1][2])); inducedField[i+PADDED_NUM_ATOMS*2] -= (long long) (0x100000000*(phid[10*i+1]*fracToCart[2][0] + phid[10*i+2]*fracToCart[2][1] + phid[10*i+3]*fracToCart[2][2])); inducedFieldPolar[i] -= (long long) (0x100000000*(phip[10*i+1]*fracToCart[0][0] + phip[10*i+2]*fracToCart[0][1] + phip[10*i+3]*fracToCart[0][2])); inducedFieldPolar[i+PADDED_NUM_ATOMS] -= (long long) (0x100000000*(phip[10*i+1]*fracToCart[1][0] + phip[10*i+2]*fracToCart[1][1] + phip[10*i+3]*fracToCart[1][2])); inducedFieldPolar[i+PADDED_NUM_ATOMS*2] -= (long long) (0x100000000*(phip[10*i+1]*fracToCart[2][0] + phip[10*i+2]*fracToCart[2][1] + phip[10*i+3]*fracToCart[2][2])); } }
7ee8919976f6d01edc6ca1339e456b228bc7beee.cu
#define ARRAY(x,y) array[(x)-1+((y)-1)*PME_ORDER] /** * Calculate the spline coefficients for a single atom along a single axis. */ __device__ void computeBSplinePoint(real4* thetai, real w, real* array) { // initialization to get to 2nd order recursion ARRAY(2,2) = w; ARRAY(2,1) = 1 - w; // perform one pass to get to 3rd order recursion ARRAY(3,3) = 0.5f * w * ARRAY(2,2); ARRAY(3,2) = 0.5f * ((1+w)*ARRAY(2,1)+(2-w)*ARRAY(2,2)); ARRAY(3,1) = 0.5f * (1-w) * ARRAY(2,1); // compute standard B-spline recursion to desired order for (int i = 4; i <= PME_ORDER; i++) { int k = i - 1; real denom = RECIP(k); ARRAY(i,i) = denom * w * ARRAY(k,k); for (int j = 1; j <= i-2; j++) ARRAY(i,i-j) = denom * ((w+j)*ARRAY(k,i-j-1)+(i-j-w)*ARRAY(k,i-j)); ARRAY(i,1) = denom * (1-w) * ARRAY(k,1); } // get coefficients for the B-spline first derivative int k = PME_ORDER - 1; ARRAY(k,PME_ORDER) = ARRAY(k,PME_ORDER-1); for (int i = PME_ORDER-1; i >= 2; i--) ARRAY(k,i) = ARRAY(k,i-1) - ARRAY(k,i); ARRAY(k,1) = -ARRAY(k,1); // get coefficients for the B-spline second derivative k = PME_ORDER - 2; ARRAY(k,PME_ORDER-1) = ARRAY(k,PME_ORDER-2); for (int i = PME_ORDER-2; i >= 2; i--) ARRAY(k,i) = ARRAY(k,i-1) - ARRAY(k,i); ARRAY(k,1) = -ARRAY(k,1); ARRAY(k,PME_ORDER) = ARRAY(k,PME_ORDER-1); for (int i = PME_ORDER-1; i >= 2; i--) ARRAY(k,i) = ARRAY(k,i-1) - ARRAY(k,i); ARRAY(k,1) = -ARRAY(k,1); // get coefficients for the B-spline third derivative k = PME_ORDER - 3; ARRAY(k,PME_ORDER-2) = ARRAY(k,PME_ORDER-3); for (int i = PME_ORDER-3; i >= 2; i--) ARRAY(k,i) = ARRAY(k,i-1) - ARRAY(k,i); ARRAY(k,1) = -ARRAY(k,1); ARRAY(k,PME_ORDER-1) = ARRAY(k,PME_ORDER-2); for (int i = PME_ORDER-2; i >= 2; i--) ARRAY(k,i) = ARRAY(k,i-1) - ARRAY(k,i); ARRAY(k,1) = -ARRAY(k,1); ARRAY(k,PME_ORDER) = ARRAY(k,PME_ORDER-1); for (int i = PME_ORDER-1; i >= 2; i--) ARRAY(k,i) = ARRAY(k,i-1) - ARRAY(k,i); ARRAY(k,1) = -ARRAY(k,1); // copy coefficients from temporary to permanent storage for (int i = 1; i <= PME_ORDER; i++) thetai[i-1] = make_real4(ARRAY(PME_ORDER,i), ARRAY(PME_ORDER-1,i), ARRAY(PME_ORDER-2,i), ARRAY(PME_ORDER-3,i)); } /** * Compute the index of the grid point each atom is associated with. */ extern "C" __global__ void findAtomGridIndex(const real4* __restrict__ posq, int2* __restrict__ pmeAtomGridIndex, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { real4 pos = posq[i]; pos -= periodicBoxVecZ*floor(pos.z*recipBoxVecZ.z+0.5f); pos -= periodicBoxVecY*floor(pos.y*recipBoxVecY.z+0.5f); pos -= periodicBoxVecX*floor(pos.x*recipBoxVecX.z+0.5f); // First axis. real w = pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x; real fr = GRID_SIZE_X*(w-(int)(w+0.5f)+0.5f); int ifr = (int) fr; int igrid1 = ifr-PME_ORDER+1; // Second axis. w = pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y; fr = GRID_SIZE_Y*(w-(int)(w+0.5f)+0.5f); ifr = (int) fr; int igrid2 = ifr-PME_ORDER+1; // Third axis. w = pos.z*recipBoxVecZ.z; fr = GRID_SIZE_Z*(w-(int)(w+0.5f)+0.5f); ifr = (int) fr; int igrid3 = ifr-PME_ORDER+1; // Record the grid point. igrid1 += (igrid1 < 0 ? GRID_SIZE_X : 0); igrid2 += (igrid2 < 0 ? GRID_SIZE_Y : 0); igrid3 += (igrid3 < 0 ? GRID_SIZE_Z : 0); pmeAtomGridIndex[i] = make_int2(i, igrid1*GRID_SIZE_Y*GRID_SIZE_Z+igrid2*GRID_SIZE_Z+igrid3); } } /** * Convert the fixed multipoles from Cartesian to fractional coordinates. */ extern "C" __global__ void transformMultipolesToFractionalCoordinates(const real* __restrict__ labFrameDipole, const real* __restrict__ labFrameQuadrupole, real* __restrict__ fracDipole, real* __restrict__ fracQuadrupole, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // Build matrices for transforming the dipoles and quadrupoles. __shared__ real a[3][3]; if (threadIdx.x == 0) { a[0][0] = GRID_SIZE_X*recipBoxVecX.x; a[0][1] = GRID_SIZE_X*recipBoxVecY.x; a[0][2] = GRID_SIZE_X*recipBoxVecZ.x; a[1][0] = GRID_SIZE_Y*recipBoxVecX.y; a[1][1] = GRID_SIZE_Y*recipBoxVecY.y; a[1][2] = GRID_SIZE_Y*recipBoxVecZ.y; a[2][0] = GRID_SIZE_Z*recipBoxVecX.z; a[2][1] = GRID_SIZE_Z*recipBoxVecY.z; a[2][2] = GRID_SIZE_Z*recipBoxVecZ.z; } __syncthreads(); int index1[] = {0, 0, 0, 1, 1, 2}; int index2[] = {0, 1, 2, 1, 2, 2}; __shared__ real b[6][6]; if (threadIdx.x < 36) { int i = threadIdx.x/6; int j = threadIdx.x-6*i; b[i][j] = a[index1[i]][index1[j]]*a[index2[i]][index2[j]]; if (index1[i] != index2[i]) b[i][j] += a[index1[i]][index2[j]]*a[index2[i]][index1[j]]; } __syncthreads(); // Transform the multipoles. real quadScale[] = {1, 2, 2, 1, 2, 1}; for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { for (int j = 0; j < 3; j++) { real dipole = 0; for (int k = 0; k < 3; k++) dipole += a[j][k]*labFrameDipole[3*i+k]; fracDipole[3*i+j] = dipole; } for (int j = 0; j < 6; j++) { real quadrupole = 0; for (int k = 0; k < 5; k++) quadrupole += quadScale[k]*b[j][k]*labFrameQuadrupole[5*i+k]; quadrupole -= quadScale[5]*b[j][5]*(labFrameQuadrupole[5*i]+labFrameQuadrupole[5*i+3]); fracQuadrupole[6*i+j] = quadrupole; } } } /** * Convert the potential from fractional to Cartesian coordinates. */ extern "C" __global__ void transformPotentialToCartesianCoordinates(const real* __restrict__ fphi, real* __restrict__ cphi, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // Build matrices for transforming the potential. __shared__ real a[3][3]; if (threadIdx.x == 0) { a[0][0] = GRID_SIZE_X*recipBoxVecX.x; a[1][0] = GRID_SIZE_X*recipBoxVecY.x; a[2][0] = GRID_SIZE_X*recipBoxVecZ.x; a[0][1] = GRID_SIZE_Y*recipBoxVecX.y; a[1][1] = GRID_SIZE_Y*recipBoxVecY.y; a[2][1] = GRID_SIZE_Y*recipBoxVecZ.y; a[0][2] = GRID_SIZE_Z*recipBoxVecX.z; a[1][2] = GRID_SIZE_Z*recipBoxVecY.z; a[2][2] = GRID_SIZE_Z*recipBoxVecZ.z; } __syncthreads(); int index1[] = {0, 1, 2, 0, 0, 1}; int index2[] = {0, 1, 2, 1, 2, 2}; __shared__ real b[6][6]; if (threadIdx.x < 36) { int i = threadIdx.x/6; int j = threadIdx.x-6*i; b[i][j] = a[index1[i]][index1[j]]*a[index2[i]][index2[j]]; if (index1[j] != index2[j]) b[i][j] += (i < 3 ? b[i][j] : a[index1[i]][index2[j]]*a[index2[i]][index1[j]]); } __syncthreads(); // Transform the potential. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { cphi[10*i] = fphi[20*i]; cphi[10*i+1] = a[0][0]*fphi[20*i+1] + a[0][1]*fphi[20*i+2] + a[0][2]*fphi[20*i+3]; cphi[10*i+2] = a[1][0]*fphi[20*i+1] + a[1][1]*fphi[20*i+2] + a[1][2]*fphi[20*i+3]; cphi[10*i+3] = a[2][0]*fphi[20*i+1] + a[2][1]*fphi[20*i+2] + a[2][2]*fphi[20*i+3]; for (int j = 0; j < 6; j++) { cphi[10*i+4+j] = 0; for (int k = 0; k < 6; k++) cphi[10*i+4+j] += b[j][k]*fphi[20*i+4+k]; } } } extern "C" __global__ void gridSpreadFixedMultipoles(const real4* __restrict__ posq, const real* __restrict__ fracDipole, const real* __restrict__ fracQuadrupole, real2* __restrict__ pmeGrid, int2* __restrict__ pmeAtomGridIndex, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { real array[PME_ORDER*PME_ORDER]; real4 theta1[PME_ORDER]; real4 theta2[PME_ORDER]; real4 theta3[PME_ORDER]; // Process the atoms in spatially sorted order. This improves cache performance when loading // the grid values. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int m = pmeAtomGridIndex[i].x; real4 pos = posq[m]; pos -= periodicBoxVecZ*floor(pos.z*recipBoxVecZ.z+0.5f); pos -= periodicBoxVecY*floor(pos.y*recipBoxVecY.z+0.5f); pos -= periodicBoxVecX*floor(pos.x*recipBoxVecX.z+0.5f); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real w = pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x; real fr = GRID_SIZE_X*(w-(int)(w+0.5f)+0.5f); int ifr = (int) floor(fr); w = fr - ifr; int igrid1 = ifr-PME_ORDER+1; computeBSplinePoint(theta1, w, array); w = pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y; fr = GRID_SIZE_Y*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid2 = ifr-PME_ORDER+1; computeBSplinePoint(theta2, w, array); w = pos.z*recipBoxVecZ.z; fr = GRID_SIZE_Z*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid3 = ifr-PME_ORDER+1; computeBSplinePoint(theta3, w, array); igrid1 += (igrid1 < 0 ? GRID_SIZE_X : 0); igrid2 += (igrid2 < 0 ? GRID_SIZE_Y : 0); igrid3 += (igrid3 < 0 ? GRID_SIZE_Z : 0); // Spread the charge from this atom onto each grid point. for (int ix = 0; ix < PME_ORDER; ix++) { int xbase = igrid1+ix; xbase -= (xbase >= GRID_SIZE_X ? GRID_SIZE_X : 0); xbase = xbase*GRID_SIZE_Y*GRID_SIZE_Z; real4 t = theta1[ix]; for (int iy = 0; iy < PME_ORDER; iy++) { int ybase = igrid2+iy; ybase -= (ybase >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); ybase = xbase + ybase*GRID_SIZE_Z; real4 u = theta2[iy]; for (int iz = 0; iz < PME_ORDER; iz++) { int zindex = igrid3+iz; zindex -= (zindex >= GRID_SIZE_Z ? GRID_SIZE_Z : 0); int index = ybase + zindex; real4 v = theta3[iz]; real atomCharge = pos.w; real atomDipoleX = fracDipole[m*3]; real atomDipoleY = fracDipole[m*3+1]; real atomDipoleZ = fracDipole[m*3+2]; real atomQuadrupoleXX = fracQuadrupole[m*6]; real atomQuadrupoleXY = fracQuadrupole[m*6+1]; real atomQuadrupoleXZ = fracQuadrupole[m*6+2]; real atomQuadrupoleYY = fracQuadrupole[m*6+3]; real atomQuadrupoleYZ = fracQuadrupole[m*6+4]; real atomQuadrupoleZZ = fracQuadrupole[m*6+5]; real term0 = atomCharge*u.x*v.x + atomDipoleY*u.y*v.x + atomDipoleZ*u.x*v.y + atomQuadrupoleYY*u.z*v.x + atomQuadrupoleZZ*u.x*v.z + atomQuadrupoleYZ*u.y*v.y; real term1 = atomDipoleX*u.x*v.x + atomQuadrupoleXY*u.y*v.x + atomQuadrupoleXZ*u.x*v.y; real term2 = atomQuadrupoleXX * u.x * v.x; real add = term0*t.x + term1*t.y + term2*t.z; #ifdef USE_DOUBLE_PRECISION unsigned long long * ulonglong_p = (unsigned long long *) pmeGrid; atomicAdd(&ulonglong_p[2*index], static_cast<unsigned long long>((long long) (add*0x100000000))); #else atomicAdd(&pmeGrid[index].x, add); #endif } } } } } extern "C" __global__ void gridSpreadInducedDipoles(const real4* __restrict__ posq, const real* __restrict__ inducedDipole, const real* __restrict__ inducedDipolePolar, real2* __restrict__ pmeGrid, int2* __restrict__ pmeAtomGridIndex, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { real array[PME_ORDER*PME_ORDER]; real4 theta1[PME_ORDER]; real4 theta2[PME_ORDER]; real4 theta3[PME_ORDER]; __shared__ real cartToFrac[3][3]; if (threadIdx.x == 0) { cartToFrac[0][0] = GRID_SIZE_X*recipBoxVecX.x; cartToFrac[0][1] = GRID_SIZE_X*recipBoxVecY.x; cartToFrac[0][2] = GRID_SIZE_X*recipBoxVecZ.x; cartToFrac[1][0] = GRID_SIZE_Y*recipBoxVecX.y; cartToFrac[1][1] = GRID_SIZE_Y*recipBoxVecY.y; cartToFrac[1][2] = GRID_SIZE_Y*recipBoxVecZ.y; cartToFrac[2][0] = GRID_SIZE_Z*recipBoxVecX.z; cartToFrac[2][1] = GRID_SIZE_Z*recipBoxVecY.z; cartToFrac[2][2] = GRID_SIZE_Z*recipBoxVecZ.z; } __syncthreads(); // Process the atoms in spatially sorted order. This improves cache performance when loading // the grid values. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int m = pmeAtomGridIndex[i].x; real4 pos = posq[m]; pos -= periodicBoxVecZ*floor(pos.z*recipBoxVecZ.z+0.5f); pos -= periodicBoxVecY*floor(pos.y*recipBoxVecY.z+0.5f); pos -= periodicBoxVecX*floor(pos.x*recipBoxVecX.z+0.5f); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real w = pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x; real fr = GRID_SIZE_X*(w-(int)(w+0.5f)+0.5f); int ifr = (int) floor(fr); w = fr - ifr; int igrid1 = ifr-PME_ORDER+1; computeBSplinePoint(theta1, w, array); w = pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y; fr = GRID_SIZE_Y*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid2 = ifr-PME_ORDER+1; computeBSplinePoint(theta2, w, array); w = pos.z*recipBoxVecZ.z; fr = GRID_SIZE_Z*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid3 = ifr-PME_ORDER+1; computeBSplinePoint(theta3, w, array); igrid1 += (igrid1 < 0 ? GRID_SIZE_X : 0); igrid2 += (igrid2 < 0 ? GRID_SIZE_Y : 0); igrid3 += (igrid3 < 0 ? GRID_SIZE_Z : 0); // Spread the charge from this atom onto each grid point. for (int ix = 0; ix < PME_ORDER; ix++) { int xbase = igrid1+ix; xbase -= (xbase >= GRID_SIZE_X ? GRID_SIZE_X : 0); xbase = xbase*GRID_SIZE_Y*GRID_SIZE_Z; real4 t = theta1[ix]; for (int iy = 0; iy < PME_ORDER; iy++) { int ybase = igrid2+iy; ybase -= (ybase >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); ybase = xbase + ybase*GRID_SIZE_Z; real4 u = theta2[iy]; for (int iz = 0; iz < PME_ORDER; iz++) { int zindex = igrid3+iz; zindex -= (zindex >= GRID_SIZE_Z ? GRID_SIZE_Z : 0); int index = ybase + zindex; real4 v = theta3[iz]; real3 cinducedDipole = make_real3(inducedDipole[m*3], inducedDipole[m*3+1], inducedDipole[m*3+2]); real3 cinducedDipolePolar = make_real3(inducedDipolePolar[m*3], inducedDipolePolar[m*3+1], inducedDipolePolar[m*3+2]); real3 finducedDipole = make_real3(cinducedDipole.x*cartToFrac[0][0] + cinducedDipole.y*cartToFrac[0][1] + cinducedDipole.z*cartToFrac[0][2], cinducedDipole.x*cartToFrac[1][0] + cinducedDipole.y*cartToFrac[1][1] + cinducedDipole.z*cartToFrac[1][2], cinducedDipole.x*cartToFrac[2][0] + cinducedDipole.y*cartToFrac[2][1] + cinducedDipole.z*cartToFrac[2][2]); real3 finducedDipolePolar = make_real3(cinducedDipolePolar.x*cartToFrac[0][0] + cinducedDipolePolar.y*cartToFrac[0][1] + cinducedDipolePolar.z*cartToFrac[0][2], cinducedDipolePolar.x*cartToFrac[1][0] + cinducedDipolePolar.y*cartToFrac[1][1] + cinducedDipolePolar.z*cartToFrac[1][2], cinducedDipolePolar.x*cartToFrac[2][0] + cinducedDipolePolar.y*cartToFrac[2][1] + cinducedDipolePolar.z*cartToFrac[2][2]); real term01 = finducedDipole.y*u.y*v.x + finducedDipole.z*u.x*v.y; real term11 = finducedDipole.x*u.x*v.x; real term02 = finducedDipolePolar.y*u.y*v.x + finducedDipolePolar.z*u.x*v.y; real term12 = finducedDipolePolar.x*u.x*v.x; real add1 = term01*t.x + term11*t.y; real add2 = term02*t.x + term12*t.y; #ifdef USE_DOUBLE_PRECISION unsigned long long * ulonglong_p = (unsigned long long *) pmeGrid; atomicAdd(&ulonglong_p[2*index], static_cast<unsigned long long>((long long) (add1*0x100000000))); atomicAdd(&ulonglong_p[2*index+1], static_cast<unsigned long long>((long long) (add2*0x100000000))); #else atomicAdd(&pmeGrid[index].x, add1); atomicAdd(&pmeGrid[index].y, add2); #endif } } } } } /** * In double precision, we have to use fixed point to accumulate the grid values, so convert them to floating point. */ extern "C" __global__ void finishSpreadCharge(long long* __restrict__ pmeGrid) { real* floatGrid = (real*) pmeGrid; const unsigned int gridSize = 2*GRID_SIZE_X*GRID_SIZE_Y*GRID_SIZE_Z; real scale = 1/(real) 0x100000000; for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) floatGrid[index] = scale*pmeGrid[index]; } extern "C" __global__ void reciprocalConvolution(real2* __restrict__ pmeGrid, const real* __restrict__ pmeBsplineModuliX, const real* __restrict__ pmeBsplineModuliY, const real* __restrict__ pmeBsplineModuliZ, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { const unsigned int gridSize = GRID_SIZE_X*GRID_SIZE_Y*GRID_SIZE_Z; real expFactor = M_PI*M_PI/(EWALD_ALPHA*EWALD_ALPHA); real scaleFactor = RECIP(M_PI*periodicBoxSize.x*periodicBoxSize.y*periodicBoxSize.z); for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) { int kx = index/(GRID_SIZE_Y*GRID_SIZE_Z); int remainder = index-kx*GRID_SIZE_Y*GRID_SIZE_Z; int ky = remainder/GRID_SIZE_Z; int kz = remainder-ky*GRID_SIZE_Z; if (kx == 0 && ky == 0 && kz == 0) { pmeGrid[index] = make_real2(0, 0); continue; } int mx = (kx < (GRID_SIZE_X+1)/2) ? kx : (kx-GRID_SIZE_X); int my = (ky < (GRID_SIZE_Y+1)/2) ? ky : (ky-GRID_SIZE_Y); int mz = (kz < (GRID_SIZE_Z+1)/2) ? kz : (kz-GRID_SIZE_Z); real mhx = mx*recipBoxVecX.x; real mhy = mx*recipBoxVecY.x+my*recipBoxVecY.y; real mhz = mx*recipBoxVecZ.x+my*recipBoxVecZ.y+mz*recipBoxVecZ.z; real bx = pmeBsplineModuliX[kx]; real by = pmeBsplineModuliY[ky]; real bz = pmeBsplineModuliZ[kz]; real2 grid = pmeGrid[index]; real m2 = mhx*mhx+mhy*mhy+mhz*mhz; real denom = m2*bx*by*bz; real eterm = scaleFactor*EXP(-expFactor*m2)/denom; pmeGrid[index] = make_real2(grid.x*eterm, grid.y*eterm); } } extern "C" __global__ void computeFixedPotentialFromGrid(const real2* __restrict__ pmeGrid, real* __restrict__ phi, long long* __restrict__ fieldBuffers, long long* __restrict__ fieldPolarBuffers, const real4* __restrict__ posq, const real* __restrict__ labFrameDipole, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ, int2* __restrict__ pmeAtomGridIndex) { real array[PME_ORDER*PME_ORDER]; real4 theta1[PME_ORDER]; real4 theta2[PME_ORDER]; real4 theta3[PME_ORDER]; __shared__ real fracToCart[3][3]; if (threadIdx.x == 0) { fracToCart[0][0] = GRID_SIZE_X*recipBoxVecX.x; fracToCart[1][0] = GRID_SIZE_X*recipBoxVecY.x; fracToCart[2][0] = GRID_SIZE_X*recipBoxVecZ.x; fracToCart[0][1] = GRID_SIZE_Y*recipBoxVecX.y; fracToCart[1][1] = GRID_SIZE_Y*recipBoxVecY.y; fracToCart[2][1] = GRID_SIZE_Y*recipBoxVecZ.y; fracToCart[0][2] = GRID_SIZE_Z*recipBoxVecX.z; fracToCart[1][2] = GRID_SIZE_Z*recipBoxVecY.z; fracToCart[2][2] = GRID_SIZE_Z*recipBoxVecZ.z; } __syncthreads(); // Process the atoms in spatially sorted order. This improves cache performance when loading // the grid values. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int m = pmeAtomGridIndex[i].x; real4 pos = posq[m]; pos -= periodicBoxVecZ*floor(pos.z*recipBoxVecZ.z+0.5f); pos -= periodicBoxVecY*floor(pos.y*recipBoxVecY.z+0.5f); pos -= periodicBoxVecX*floor(pos.x*recipBoxVecX.z+0.5f); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real w = pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x; real fr = GRID_SIZE_X*(w-(int)(w+0.5f)+0.5f); int ifr = (int) floor(fr); w = fr - ifr; int igrid1 = ifr-PME_ORDER+1; computeBSplinePoint(theta1, w, array); w = pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y; fr = GRID_SIZE_Y*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid2 = ifr-PME_ORDER+1; computeBSplinePoint(theta2, w, array); w = pos.z*recipBoxVecZ.z; fr = GRID_SIZE_Z*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid3 = ifr-PME_ORDER+1; computeBSplinePoint(theta3, w, array); igrid1 += (igrid1 < 0 ? GRID_SIZE_X : 0); igrid2 += (igrid2 < 0 ? GRID_SIZE_Y : 0); igrid3 += (igrid3 < 0 ? GRID_SIZE_Z : 0); // Compute the potential from this grid point. real tuv000 = 0; real tuv001 = 0; real tuv010 = 0; real tuv100 = 0; real tuv200 = 0; real tuv020 = 0; real tuv002 = 0; real tuv110 = 0; real tuv101 = 0; real tuv011 = 0; real tuv300 = 0; real tuv030 = 0; real tuv003 = 0; real tuv210 = 0; real tuv201 = 0; real tuv120 = 0; real tuv021 = 0; real tuv102 = 0; real tuv012 = 0; real tuv111 = 0; for (int iz = 0; iz < PME_ORDER; iz++) { int k = igrid3+iz-(igrid3+iz >= GRID_SIZE_Z ? GRID_SIZE_Z : 0); real4 v = theta3[iz]; real tu00 = 0; real tu10 = 0; real tu01 = 0; real tu20 = 0; real tu11 = 0; real tu02 = 0; real tu30 = 0; real tu21 = 0; real tu12 = 0; real tu03 = 0; for (int iy = 0; iy < PME_ORDER; iy++) { int j = igrid2+iy-(igrid2+iy >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); real4 u = theta2[iy]; real4 t = make_real4(0, 0, 0, 0); for (int ix = 0; ix < PME_ORDER; ix++) { int i = igrid1+ix-(igrid1+ix >= GRID_SIZE_X ? GRID_SIZE_X : 0); int gridIndex = i*GRID_SIZE_Y*GRID_SIZE_Z + j*GRID_SIZE_Z + k; real tq = pmeGrid[gridIndex].x; real4 tadd = theta1[ix]; t.x += tq*tadd.x; t.y += tq*tadd.y; t.z += tq*tadd.z; t.w += tq*tadd.w; } tu00 += t.x*u.x; tu10 += t.y*u.x; tu01 += t.x*u.y; tu20 += t.z*u.x; tu11 += t.y*u.y; tu02 += t.x*u.z; tu30 += t.w*u.x; tu21 += t.z*u.y; tu12 += t.y*u.z; tu03 += t.x*u.w; } tuv000 += tu00*v.x; tuv100 += tu10*v.x; tuv010 += tu01*v.x; tuv001 += tu00*v.y; tuv200 += tu20*v.x; tuv020 += tu02*v.x; tuv002 += tu00*v.z; tuv110 += tu11*v.x; tuv101 += tu10*v.y; tuv011 += tu01*v.y; tuv300 += tu30*v.x; tuv030 += tu03*v.x; tuv003 += tu00*v.w; tuv210 += tu21*v.x; tuv201 += tu20*v.y; tuv120 += tu12*v.x; tuv021 += tu02*v.y; tuv102 += tu10*v.z; tuv012 += tu01*v.z; tuv111 += tu11*v.y; } phi[20*m] = tuv000; phi[20*m+1] = tuv100; phi[20*m+2] = tuv010; phi[20*m+3] = tuv001; phi[20*m+4] = tuv200; phi[20*m+5] = tuv020; phi[20*m+6] = tuv002; phi[20*m+7] = tuv110; phi[20*m+8] = tuv101; phi[20*m+9] = tuv011; phi[20*m+10] = tuv300; phi[20*m+11] = tuv030; phi[20*m+12] = tuv003; phi[20*m+13] = tuv210; phi[20*m+14] = tuv201; phi[20*m+15] = tuv120; phi[20*m+16] = tuv021; phi[20*m+17] = tuv102; phi[20*m+18] = tuv012; phi[20*m+19] = tuv111; real dipoleScale = (4/(real) 3)*(EWALD_ALPHA*EWALD_ALPHA*EWALD_ALPHA)/SQRT_PI; long long fieldx = (long long) ((dipoleScale*labFrameDipole[m*3]-tuv100*fracToCart[0][0]-tuv010*fracToCart[0][1]-tuv001*fracToCart[0][2])*0x100000000); fieldBuffers[m] = fieldx; fieldPolarBuffers[m] = fieldx; long long fieldy = (long long) ((dipoleScale*labFrameDipole[m*3+1]-tuv100*fracToCart[1][0]-tuv010*fracToCart[1][1]-tuv001*fracToCart[1][2])*0x100000000); fieldBuffers[m+PADDED_NUM_ATOMS] = fieldy; fieldPolarBuffers[m+PADDED_NUM_ATOMS] = fieldy; long long fieldz = (long long) ((dipoleScale*labFrameDipole[m*3+2]-tuv100*fracToCart[2][0]-tuv010*fracToCart[2][1]-tuv001*fracToCart[2][2])*0x100000000); fieldBuffers[m+2*PADDED_NUM_ATOMS] = fieldz; fieldPolarBuffers[m+2*PADDED_NUM_ATOMS] = fieldz; } } extern "C" __global__ void computeInducedPotentialFromGrid(const real2* __restrict__ pmeGrid, real* __restrict__ phid, real* __restrict__ phip, real* __restrict__ phidp, const real4* __restrict__ posq, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ, int2* __restrict__ pmeAtomGridIndex) { real array[PME_ORDER*PME_ORDER]; real4 theta1[PME_ORDER]; real4 theta2[PME_ORDER]; real4 theta3[PME_ORDER]; // Process the atoms in spatially sorted order. This improves cache performance when loading // the grid values. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int m = pmeAtomGridIndex[i].x; real4 pos = posq[m]; pos -= periodicBoxVecZ*floor(pos.z*recipBoxVecZ.z+0.5f); pos -= periodicBoxVecY*floor(pos.y*recipBoxVecY.z+0.5f); pos -= periodicBoxVecX*floor(pos.x*recipBoxVecX.z+0.5f); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real w = pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x; real fr = GRID_SIZE_X*(w-(int)(w+0.5f)+0.5f); int ifr = (int) floor(fr); w = fr - ifr; int igrid1 = ifr-PME_ORDER+1; computeBSplinePoint(theta1, w, array); w = pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y; fr = GRID_SIZE_Y*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid2 = ifr-PME_ORDER+1; computeBSplinePoint(theta2, w, array); w = pos.z*recipBoxVecZ.z; fr = GRID_SIZE_Z*(w-(int)(w+0.5f)+0.5f); ifr = (int) floor(fr); w = fr - ifr; int igrid3 = ifr-PME_ORDER+1; computeBSplinePoint(theta3, w, array); igrid1 += (igrid1 < 0 ? GRID_SIZE_X : 0); igrid2 += (igrid2 < 0 ? GRID_SIZE_Y : 0); igrid3 += (igrid3 < 0 ? GRID_SIZE_Z : 0); // Compute the potential from this grid point. real tuv100_1 = 0; real tuv010_1 = 0; real tuv001_1 = 0; real tuv200_1 = 0; real tuv020_1 = 0; real tuv002_1 = 0; real tuv110_1 = 0; real tuv101_1 = 0; real tuv011_1 = 0; real tuv100_2 = 0; real tuv010_2 = 0; real tuv001_2 = 0; real tuv200_2 = 0; real tuv020_2 = 0; real tuv002_2 = 0; real tuv110_2 = 0; real tuv101_2 = 0; real tuv011_2 = 0; real tuv000 = 0; real tuv001 = 0; real tuv010 = 0; real tuv100 = 0; real tuv200 = 0; real tuv020 = 0; real tuv002 = 0; real tuv110 = 0; real tuv101 = 0; real tuv011 = 0; real tuv300 = 0; real tuv030 = 0; real tuv003 = 0; real tuv210 = 0; real tuv201 = 0; real tuv120 = 0; real tuv021 = 0; real tuv102 = 0; real tuv012 = 0; real tuv111 = 0; for (int iz = 0; iz < PME_ORDER; iz++) { int k = igrid3+iz-(igrid3+iz >= GRID_SIZE_Z ? GRID_SIZE_Z : 0); real4 v = theta3[iz]; real tu00_1 = 0; real tu01_1 = 0; real tu10_1 = 0; real tu20_1 = 0; real tu11_1 = 0; real tu02_1 = 0; real tu00_2 = 0; real tu01_2 = 0; real tu10_2 = 0; real tu20_2 = 0; real tu11_2 = 0; real tu02_2 = 0; real tu00 = 0; real tu10 = 0; real tu01 = 0; real tu20 = 0; real tu11 = 0; real tu02 = 0; real tu30 = 0; real tu21 = 0; real tu12 = 0; real tu03 = 0; for (int iy = 0; iy < PME_ORDER; iy++) { int j = igrid2+iy-(igrid2+iy >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); real4 u = theta2[iy]; real t0_1 = 0; real t1_1 = 0; real t2_1 = 0; real t0_2 = 0; real t1_2 = 0; real t2_2 = 0; real t3 = 0; for (int ix = 0; ix < PME_ORDER; ix++) { int i = igrid1+ix-(igrid1+ix >= GRID_SIZE_X ? GRID_SIZE_X : 0); int gridIndex = i*GRID_SIZE_Y*GRID_SIZE_Z + j*GRID_SIZE_Z + k; real2 tq = pmeGrid[gridIndex]; real4 tadd = theta1[ix]; t0_1 += tq.x*tadd.x; t1_1 += tq.x*tadd.y; t2_1 += tq.x*tadd.z; t0_2 += tq.y*tadd.x; t1_2 += tq.y*tadd.y; t2_2 += tq.y*tadd.z; t3 += (tq.x+tq.y)*tadd.w; } tu00_1 += t0_1*u.x; tu10_1 += t1_1*u.x; tu01_1 += t0_1*u.y; tu20_1 += t2_1*u.x; tu11_1 += t1_1*u.y; tu02_1 += t0_1*u.z; tu00_2 += t0_2*u.x; tu10_2 += t1_2*u.x; tu01_2 += t0_2*u.y; tu20_2 += t2_2*u.x; tu11_2 += t1_2*u.y; tu02_2 += t0_2*u.z; real t0 = t0_1 + t0_2; real t1 = t1_1 + t1_2; real t2 = t2_1 + t2_2; tu00 += t0*u.x; tu10 += t1*u.x; tu01 += t0*u.y; tu20 += t2*u.x; tu11 += t1*u.y; tu02 += t0*u.z; tu30 += t3*u.x; tu21 += t2*u.y; tu12 += t1*u.z; tu03 += t0*u.w; } tuv100_1 += tu10_1*v.x; tuv010_1 += tu01_1*v.x; tuv001_1 += tu00_1*v.y; tuv200_1 += tu20_1*v.x; tuv020_1 += tu02_1*v.x; tuv002_1 += tu00_1*v.z; tuv110_1 += tu11_1*v.x; tuv101_1 += tu10_1*v.y; tuv011_1 += tu01_1*v.y; tuv100_2 += tu10_2*v.x; tuv010_2 += tu01_2*v.x; tuv001_2 += tu00_2*v.y; tuv200_2 += tu20_2*v.x; tuv020_2 += tu02_2*v.x; tuv002_2 += tu00_2*v.z; tuv110_2 += tu11_2*v.x; tuv101_2 += tu10_2*v.y; tuv011_2 += tu01_2*v.y; tuv000 += tu00*v.x; tuv100 += tu10*v.x; tuv010 += tu01*v.x; tuv001 += tu00*v.y; tuv200 += tu20*v.x; tuv020 += tu02*v.x; tuv002 += tu00*v.z; tuv110 += tu11*v.x; tuv101 += tu10*v.y; tuv011 += tu01*v.y; tuv300 += tu30*v.x; tuv030 += tu03*v.x; tuv003 += tu00*v.w; tuv210 += tu21*v.x; tuv201 += tu20*v.y; tuv120 += tu12*v.x; tuv021 += tu02*v.y; tuv102 += tu10*v.z; tuv012 += tu01*v.z; tuv111 += tu11*v.y; } phid[10*m] = 0; phid[10*m+1] = tuv100_1; phid[10*m+2] = tuv010_1; phid[10*m+3] = tuv001_1; phid[10*m+4] = tuv200_1; phid[10*m+5] = tuv020_1; phid[10*m+6] = tuv002_1; phid[10*m+7] = tuv110_1; phid[10*m+8] = tuv101_1; phid[10*m+9] = tuv011_1; phip[10*m] = 0; phip[10*m+1] = tuv100_2; phip[10*m+2] = tuv010_2; phip[10*m+3] = tuv001_2; phip[10*m+4] = tuv200_2; phip[10*m+5] = tuv020_2; phip[10*m+6] = tuv002_2; phip[10*m+7] = tuv110_2; phip[10*m+8] = tuv101_2; phip[10*m+9] = tuv011_2; phidp[20*m] = tuv000; phidp[20*m+1] = tuv100; phidp[20*m+2] = tuv010; phidp[20*m+3] = tuv001; phidp[20*m+4] = tuv200; phidp[20*m+5] = tuv020; phidp[20*m+6] = tuv002; phidp[20*m+7] = tuv110; phidp[20*m+8] = tuv101; phidp[20*m+9] = tuv011; phidp[20*m+10] = tuv300; phidp[20*m+11] = tuv030; phidp[20*m+12] = tuv003; phidp[20*m+13] = tuv210; phidp[20*m+14] = tuv201; phidp[20*m+15] = tuv120; phidp[20*m+16] = tuv021; phidp[20*m+17] = tuv102; phidp[20*m+18] = tuv012; phidp[20*m+19] = tuv111; } } extern "C" __global__ void computeFixedMultipoleForceAndEnergy(real4* __restrict__ posq, unsigned long long* __restrict__ forceBuffers, long long* __restrict__ torqueBuffers, real* __restrict__ energyBuffer, const real* __restrict__ labFrameDipole, const real* __restrict__ labFrameQuadrupole, const real* __restrict__ fracDipole, const real* __restrict__ fracQuadrupole, const real* __restrict__ phi_global, const real* __restrict__ cphi_global, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { real multipole[10]; const int deriv1[] = {1, 4, 7, 8, 10, 15, 17, 13, 14, 19}; const int deriv2[] = {2, 7, 5, 9, 13, 11, 18, 15, 19, 16}; const int deriv3[] = {3, 8, 9, 6, 14, 16, 12, 19, 17, 18}; real energy = 0; __shared__ real fracToCart[3][3]; if (threadIdx.x == 0) { fracToCart[0][0] = GRID_SIZE_X*recipBoxVecX.x; fracToCart[1][0] = GRID_SIZE_X*recipBoxVecY.x; fracToCart[2][0] = GRID_SIZE_X*recipBoxVecZ.x; fracToCart[0][1] = GRID_SIZE_Y*recipBoxVecX.y; fracToCart[1][1] = GRID_SIZE_Y*recipBoxVecY.y; fracToCart[2][1] = GRID_SIZE_Y*recipBoxVecZ.y; fracToCart[0][2] = GRID_SIZE_Z*recipBoxVecX.z; fracToCart[1][2] = GRID_SIZE_Z*recipBoxVecY.z; fracToCart[2][2] = GRID_SIZE_Z*recipBoxVecZ.z; } __syncthreads(); for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { // Compute the torque. multipole[0] = posq[i].w; multipole[1] = labFrameDipole[i*3]; multipole[2] = labFrameDipole[i*3+1]; multipole[3] = labFrameDipole[i*3+2]; multipole[4] = labFrameQuadrupole[i*5]; multipole[5] = labFrameQuadrupole[i*5+3]; multipole[6] = -(multipole[4]+multipole[5]); multipole[7] = 2*labFrameQuadrupole[i*5+1]; multipole[8] = 2*labFrameQuadrupole[i*5+2]; multipole[9] = 2*labFrameQuadrupole[i*5+4]; const real* cphi = &cphi_global[10*i]; torqueBuffers[i] = (long long) (EPSILON_FACTOR*(multipole[3]*cphi[2] - multipole[2]*cphi[3] + 2*(multipole[6]-multipole[5])*cphi[9] + multipole[8]*cphi[7] + multipole[9]*cphi[5] - multipole[7]*cphi[8] - multipole[9]*cphi[6])*0x100000000); torqueBuffers[i+PADDED_NUM_ATOMS] = (long long) (EPSILON_FACTOR*(multipole[1]*cphi[3] - multipole[3]*cphi[1] + 2*(multipole[4]-multipole[6])*cphi[8] + multipole[7]*cphi[9] + multipole[8]*cphi[6] - multipole[8]*cphi[4] - multipole[9]*cphi[7])*0x100000000); torqueBuffers[i+PADDED_NUM_ATOMS*2] = (long long) (EPSILON_FACTOR*(multipole[2]*cphi[1] - multipole[1]*cphi[2] + 2*(multipole[5]-multipole[4])*cphi[7] + multipole[7]*cphi[4] + multipole[9]*cphi[8] - multipole[7]*cphi[5] - multipole[8]*cphi[9])*0x100000000); // Compute the force and energy. multipole[1] = fracDipole[i*3]; multipole[2] = fracDipole[i*3+1]; multipole[3] = fracDipole[i*3+2]; multipole[4] = fracQuadrupole[i*6]; multipole[5] = fracQuadrupole[i*6+3]; multipole[6] = fracQuadrupole[i*6+5]; multipole[7] = fracQuadrupole[i*6+1]; multipole[8] = fracQuadrupole[i*6+2]; multipole[9] = fracQuadrupole[i*6+4]; const real* phi = &phi_global[20*i]; real4 f = make_real4(0, 0, 0, 0); for (int k = 0; k < 10; k++) { energy += multipole[k]*phi[k]; f.x += multipole[k]*phi[deriv1[k]]; f.y += multipole[k]*phi[deriv2[k]]; f.z += multipole[k]*phi[deriv3[k]]; } f = make_real4(EPSILON_FACTOR*(f.x*fracToCart[0][0] + f.y*fracToCart[0][1] + f.z*fracToCart[0][2]), EPSILON_FACTOR*(f.x*fracToCart[1][0] + f.y*fracToCart[1][1] + f.z*fracToCart[1][2]), EPSILON_FACTOR*(f.x*fracToCart[2][0] + f.y*fracToCart[2][1] + f.z*fracToCart[2][2]), 0); forceBuffers[i] -= static_cast<unsigned long long>((long long) (f.x*0x100000000)); forceBuffers[i+PADDED_NUM_ATOMS] -= static_cast<unsigned long long>((long long) (f.y*0x100000000)); forceBuffers[i+PADDED_NUM_ATOMS*2] -= static_cast<unsigned long long>((long long) (f.z*0x100000000)); } energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += 0.5f*EPSILON_FACTOR*energy; } extern "C" __global__ void computeInducedDipoleForceAndEnergy(real4* __restrict__ posq, unsigned long long* __restrict__ forceBuffers, long long* __restrict__ torqueBuffers, real* __restrict__ energyBuffer, const real* __restrict__ labFrameDipole, const real* __restrict__ labFrameQuadrupole, const real* __restrict__ fracDipole, const real* __restrict__ fracQuadrupole, const real* __restrict__ inducedDipole_global, const real* __restrict__ inducedDipolePolar_global, const real* __restrict__ phi_global, const real* __restrict__ phid_global, const real* __restrict__ phip_global, const real* __restrict__ phidp_global, const real* __restrict__ cphi_global, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { real multipole[10]; real cinducedDipole[3], inducedDipole[3]; real cinducedDipolePolar[3], inducedDipolePolar[3]; const int deriv1[] = {1, 4, 7, 8, 10, 15, 17, 13, 14, 19}; const int deriv2[] = {2, 7, 5, 9, 13, 11, 18, 15, 19, 16}; const int deriv3[] = {3, 8, 9, 6, 14, 16, 12, 19, 17, 18}; real energy = 0; __shared__ real fracToCart[3][3]; if (threadIdx.x == 0) { fracToCart[0][0] = GRID_SIZE_X*recipBoxVecX.x; fracToCart[1][0] = GRID_SIZE_X*recipBoxVecY.x; fracToCart[2][0] = GRID_SIZE_X*recipBoxVecZ.x; fracToCart[0][1] = GRID_SIZE_Y*recipBoxVecX.y; fracToCart[1][1] = GRID_SIZE_Y*recipBoxVecY.y; fracToCart[2][1] = GRID_SIZE_Y*recipBoxVecZ.y; fracToCart[0][2] = GRID_SIZE_Z*recipBoxVecX.z; fracToCart[1][2] = GRID_SIZE_Z*recipBoxVecY.z; fracToCart[2][2] = GRID_SIZE_Z*recipBoxVecZ.z; } __syncthreads(); for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { // Compute the torque. multipole[0] = posq[i].w; multipole[1] = labFrameDipole[i*3]; multipole[2] = labFrameDipole[i*3+1]; multipole[3] = labFrameDipole[i*3+2]; multipole[4] = labFrameQuadrupole[i*5]; multipole[5] = labFrameQuadrupole[i*5+3]; multipole[6] = -(multipole[4]+multipole[5]); multipole[7] = 2*labFrameQuadrupole[i*5+1]; multipole[8] = 2*labFrameQuadrupole[i*5+2]; multipole[9] = 2*labFrameQuadrupole[i*5+4]; const real* cphi = &cphi_global[10*i]; torqueBuffers[i] += (long long) (0.5f*EPSILON_FACTOR*(multipole[3]*cphi[2] - multipole[2]*cphi[3] + 2*(multipole[6]-multipole[5])*cphi[9] + multipole[8]*cphi[7] + multipole[9]*cphi[5] - multipole[7]*cphi[8] - multipole[9]*cphi[6])*0x100000000); torqueBuffers[i+PADDED_NUM_ATOMS] += (long long) (0.5f*EPSILON_FACTOR*(multipole[1]*cphi[3] - multipole[3]*cphi[1] + 2*(multipole[4]-multipole[6])*cphi[8] + multipole[7]*cphi[9] + multipole[8]*cphi[6] - multipole[8]*cphi[4] - multipole[9]*cphi[7])*0x100000000); torqueBuffers[i+PADDED_NUM_ATOMS*2] += (long long) (0.5f*EPSILON_FACTOR*(multipole[2]*cphi[1] - multipole[1]*cphi[2] + 2*(multipole[5]-multipole[4])*cphi[7] + multipole[7]*cphi[4] + multipole[9]*cphi[8] - multipole[7]*cphi[5] - multipole[8]*cphi[9])*0x100000000); // Compute the force and energy. multipole[1] = fracDipole[i*3]; multipole[2] = fracDipole[i*3+1]; multipole[3] = fracDipole[i*3+2]; multipole[4] = fracQuadrupole[i*6]; multipole[5] = fracQuadrupole[i*6+3]; multipole[6] = fracQuadrupole[i*6+5]; multipole[7] = fracQuadrupole[i*6+1]; multipole[8] = fracQuadrupole[i*6+2]; multipole[9] = fracQuadrupole[i*6+4]; cinducedDipole[0] = inducedDipole_global[i*3]; cinducedDipole[1] = inducedDipole_global[i*3+1]; cinducedDipole[2] = inducedDipole_global[i*3+2]; cinducedDipolePolar[0] = inducedDipolePolar_global[i*3]; cinducedDipolePolar[1] = inducedDipolePolar_global[i*3+1]; cinducedDipolePolar[2] = inducedDipolePolar_global[i*3+2]; // Multiply the dipoles by cartToFrac, which is just the transpose of fracToCart. inducedDipole[0] = cinducedDipole[0]*fracToCart[0][0] + cinducedDipole[1]*fracToCart[1][0] + cinducedDipole[2]*fracToCart[2][0]; inducedDipole[1] = cinducedDipole[0]*fracToCart[0][1] + cinducedDipole[1]*fracToCart[1][1] + cinducedDipole[2]*fracToCart[2][1]; inducedDipole[2] = cinducedDipole[0]*fracToCart[0][2] + cinducedDipole[1]*fracToCart[1][2] + cinducedDipole[2]*fracToCart[2][2]; inducedDipolePolar[0] = cinducedDipolePolar[0]*fracToCart[0][0] + cinducedDipolePolar[1]*fracToCart[1][0] + cinducedDipolePolar[2]*fracToCart[2][0]; inducedDipolePolar[1] = cinducedDipolePolar[0]*fracToCart[0][1] + cinducedDipolePolar[1]*fracToCart[1][1] + cinducedDipolePolar[2]*fracToCart[2][1]; inducedDipolePolar[2] = cinducedDipolePolar[0]*fracToCart[0][2] + cinducedDipolePolar[1]*fracToCart[1][2] + cinducedDipolePolar[2]*fracToCart[2][2]; const real* phi = &phi_global[20*i]; const real* phip = &phip_global[10*i]; const real* phid = &phid_global[10*i]; real4 f = make_real4(0, 0, 0, 0); energy += inducedDipole[0]*phi[1]; energy += inducedDipole[1]*phi[2]; energy += inducedDipole[2]*phi[3]; for (int k = 0; k < 3; k++) { int j1 = deriv1[k+1]; int j2 = deriv2[k+1]; int j3 = deriv3[k+1]; f.x += (inducedDipole[k]+inducedDipolePolar[k])*phi[j1]; f.y += (inducedDipole[k]+inducedDipolePolar[k])*phi[j2]; f.z += (inducedDipole[k]+inducedDipolePolar[k])*phi[j3]; #ifndef DIRECT_POLARIZATION f.x += (inducedDipole[k]*phip[j1] + inducedDipolePolar[k]*phid[j1]); f.y += (inducedDipole[k]*phip[j2] + inducedDipolePolar[k]*phid[j2]); f.z += (inducedDipole[k]*phip[j3] + inducedDipolePolar[k]*phid[j3]); #endif } const real* phidp = &phidp_global[20*i]; for (int k = 0; k < 10; k++) { f.x += multipole[k]*phidp[deriv1[k]]; f.y += multipole[k]*phidp[deriv2[k]]; f.z += multipole[k]*phidp[deriv3[k]]; } f = make_real4(0.5f*EPSILON_FACTOR*(f.x*fracToCart[0][0] + f.y*fracToCart[0][1] + f.z*fracToCart[0][2]), 0.5f*EPSILON_FACTOR*(f.x*fracToCart[1][0] + f.y*fracToCart[1][1] + f.z*fracToCart[1][2]), 0.5f*EPSILON_FACTOR*(f.x*fracToCart[2][0] + f.y*fracToCart[2][1] + f.z*fracToCart[2][2]), 0); forceBuffers[i] -= static_cast<unsigned long long>((long long) (f.x*0x100000000)); forceBuffers[i+PADDED_NUM_ATOMS] -= static_cast<unsigned long long>((long long) (f.y*0x100000000)); forceBuffers[i+PADDED_NUM_ATOMS*2] -= static_cast<unsigned long long>((long long) (f.z*0x100000000)); } energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += 0.5f*EPSILON_FACTOR*energy; } extern "C" __global__ void recordInducedFieldDipoles(const real* __restrict__ phid, real* const __restrict__ phip, long long* __restrict__ inducedField, long long* __restrict__ inducedFieldPolar, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { __shared__ real fracToCart[3][3]; if (threadIdx.x == 0) { fracToCart[0][0] = GRID_SIZE_X*recipBoxVecX.x; fracToCart[1][0] = GRID_SIZE_X*recipBoxVecY.x; fracToCart[2][0] = GRID_SIZE_X*recipBoxVecZ.x; fracToCart[0][1] = GRID_SIZE_Y*recipBoxVecX.y; fracToCart[1][1] = GRID_SIZE_Y*recipBoxVecY.y; fracToCart[2][1] = GRID_SIZE_Y*recipBoxVecZ.y; fracToCart[0][2] = GRID_SIZE_Z*recipBoxVecX.z; fracToCart[1][2] = GRID_SIZE_Z*recipBoxVecY.z; fracToCart[2][2] = GRID_SIZE_Z*recipBoxVecZ.z; } __syncthreads(); for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { inducedField[i] -= (long long) (0x100000000*(phid[10*i+1]*fracToCart[0][0] + phid[10*i+2]*fracToCart[0][1] + phid[10*i+3]*fracToCart[0][2])); inducedField[i+PADDED_NUM_ATOMS] -= (long long) (0x100000000*(phid[10*i+1]*fracToCart[1][0] + phid[10*i+2]*fracToCart[1][1] + phid[10*i+3]*fracToCart[1][2])); inducedField[i+PADDED_NUM_ATOMS*2] -= (long long) (0x100000000*(phid[10*i+1]*fracToCart[2][0] + phid[10*i+2]*fracToCart[2][1] + phid[10*i+3]*fracToCart[2][2])); inducedFieldPolar[i] -= (long long) (0x100000000*(phip[10*i+1]*fracToCart[0][0] + phip[10*i+2]*fracToCart[0][1] + phip[10*i+3]*fracToCart[0][2])); inducedFieldPolar[i+PADDED_NUM_ATOMS] -= (long long) (0x100000000*(phip[10*i+1]*fracToCart[1][0] + phip[10*i+2]*fracToCart[1][1] + phip[10*i+3]*fracToCart[1][2])); inducedFieldPolar[i+PADDED_NUM_ATOMS*2] -= (long long) (0x100000000*(phip[10*i+1]*fracToCart[2][0] + phip[10*i+2]*fracToCart[2][1] + phip[10*i+3]*fracToCart[2][2])); } }
8f1e684a962e4d88cdc488ef9d9d30b9bf275c5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hip/hip_fp16.h> #include <stdio.h> #include <algorithm> #include <cmath> #include "adaptive_pool.h" #include "amir_cuda_util/cuda_util.h" namespace amirstan { namespace plugin { #define START_IND(a, b, c) (int)::floor((float)(a * c) / b) #define END_IND(a, b, c) (int)::ceil((float)((a + 1) * c) / b) using namespace amirstan::cuda; using amirstan::cuda::TensorSize; using amirstan::cuda::TensorStride; template <typename T> __host__ __device__ __forceinline__ T ceilDiv(T a, T b) { return (a + b - 1) / b; } template <typename T> struct idleOp { inline __device__ T operator()(const T &x, size_t reduce_count = 0) { return x; } }; template <typename T> struct maxOp { inline __device__ T operator()(const T &x, const T &y) { return x > y ? x : y; } }; template <typename T> struct sumOp { inline __device__ T operator()(const T &x, const T &y) { return x + y; } }; template <typename T> struct divCountOp { inline __device__ T operator()(const T &x, size_t reduce_count = 0) { return x / reduce_count; } }; template <typename T, typename PreReduceOp, typename ReduceOp, typename PostReduceOp> __global__ void adaptive_pool_kernel( T *output, const T *input, TensorSize input_size, TensorStride input_stride, TensorSize output_size, TensorStride output_stride, int nb_dims, int nb_reduce_dims, int cell_per_block, int thread_per_cell, PreReduceOp pre_reduce_op, ReduceOp reduce_op, PostReduceOp post_reduce_op, size_t N) { const int REDUCE_MAX_COUNT = 10; float reduce_size[REDUCE_MAX_COUNT]; float reduce_stride[REDUCE_MAX_COUNT]; extern __shared__ T smemChar[]; for (int cell_id = blockIdx.x * cell_per_block + threadIdx.x / thread_per_cell; cell_id < (N); cell_id += cell_per_block * gridDim.x) { // exit if thread belong to no cell if (threadIdx.x >= cell_per_block * thread_per_cell) { break; } // how many element in cell and the start input offset of cell size_t cell_count = 1; size_t cell_input_offset = 0; int tmp_cell_id = cell_id; for (int i = 0; i < nb_dims; ++i) { const int idx = tmp_cell_id / output_stride.size[i]; tmp_cell_id = tmp_cell_id % output_stride.size[i]; const int input_idx_start = START_IND(idx, output_size.size[i], input_size.size[i]); const int input_idx_end = END_IND(idx, output_size.size[i], input_size.size[i]); cell_count *= (input_idx_end - input_idx_start); reduce_size[i] = (input_idx_end - input_idx_start); cell_input_offset += input_idx_start * input_stride.size[i]; } cell_count = max(cell_count, size_t(1)); reduce_stride[nb_dims - 1] = 1; for (int i = nb_dims - 2; i >= 0; --i) { reduce_stride[i] = reduce_stride[i + 1] * reduce_size[i + 1]; } // put data in share memory const int cell_thread_id = threadIdx.x % thread_per_cell; for (int cell_data_id = cell_thread_id; cell_data_id < cell_count; cell_data_id += thread_per_cell) { int tmp_cell_data_id = cell_data_id; size_t input_idx = 0; for (int i = nb_dims - nb_reduce_dims; i < nb_dims; ++i) { const int idx = tmp_cell_data_id / reduce_stride[i]; tmp_cell_data_id = tmp_cell_data_id % int(reduce_stride[i]); input_idx += idx * input_stride.size[i]; } if (cell_data_id == cell_thread_id) { smemChar[threadIdx.x] = pre_reduce_op(input[cell_input_offset + input_idx], cell_count); } else { smemChar[threadIdx.x] = reduce_op( smemChar[threadIdx.x], pre_reduce_op(input[cell_input_offset + input_idx], cell_count)); } } __syncthreads(); ///// reduce smemChar for (unsigned int reduce_step = thread_per_cell / 2; reduce_step > 0; reduce_step >>= 1) { if (cell_thread_id < reduce_step && cell_thread_id < cell_count) { smemChar[threadIdx.x] = reduce_op(smemChar[threadIdx.x], smemChar[threadIdx.x + reduce_step]); } __syncthreads(); } if (cell_thread_id == 0) { // for(int i=1;i<min(size_t(thread_per_cell),size_t(cell_count));++i){ // smemChar[threadIdx.x]=reduce_op(smemChar[threadIdx.x], // smemChar[threadIdx.x+i]); // } output[cell_id] = post_reduce_op(smemChar[threadIdx.x], cell_count); } } } template <typename T> void adaptive_pool(T *output, const T *input, int *input_dims, int *output_dims, int nb_dims, int nb_reduce_dims, PoolType pool_type, hipStream_t stream) { TensorSize ts_input_size; TensorStride input_stride; memcpy(&(ts_input_size.size[0]), input_dims, sizeof(int) * nb_dims); input_stride.size[nb_dims - 1] = 1; for (int i = nb_dims - 2; i >= 0; --i) { input_stride.size[i] = input_stride.size[i + 1] * ts_input_size.size[i + 1]; } TensorSize ts_output_size; TensorStride output_stride; memcpy(&ts_output_size.size[0], output_dims, sizeof(int) * nb_dims); output_stride.size[nb_dims - 1] = 1; for (int i = nb_dims - 2; i >= 0; --i) { output_stride.size[i] = output_stride.size[i + 1] * ts_output_size.size[i + 1]; } size_t reduce_cell_count = 1; for (int i = nb_dims - nb_reduce_dims; i < nb_dims; ++i) { size_t reduce_size = ceilDiv<size_t>(ts_input_size.size[i], ts_output_size.size[i]); reduce_cell_count *= max(size_t(1), reduce_size); } size_t num_cell = output_stride.size[0] * ts_output_size.size[0]; int thread_per_cell = 1; while ((thread_per_cell << 1) < reduce_cell_count) { thread_per_cell = thread_per_cell << 1; } thread_per_cell = ::min(CUDA_NUM_THREADS, thread_per_cell); int cell_per_block = ::max(1, CUDA_NUM_THREADS / thread_per_cell); int share_size = CUDA_NUM_THREADS * sizeof(T); int num_block = ceilDiv<int>(num_cell, cell_per_block); num_block = min(num_block, kMaxGridNum); switch (pool_type) { case PoolType::MAX: hipLaunchKernelGGL(( adaptive_pool_kernel<T, idleOp<T>, maxOp<T>, idleOp<T>>) , dim3(num_block), dim3(CUDA_NUM_THREADS), share_size, stream, output, input, ts_input_size, input_stride, ts_output_size, output_stride, nb_dims, nb_reduce_dims, cell_per_block, thread_per_cell, idleOp<T>(), maxOp<T>(), idleOp<T>(), num_cell); break; case PoolType::AVERAGE: hipLaunchKernelGGL(( adaptive_pool_kernel<T, idleOp<T>, sumOp<T>, divCountOp<T>>) , dim3(num_block), dim3(CUDA_NUM_THREADS), share_size, stream, output, input, ts_input_size, input_stride, ts_output_size, output_stride, nb_dims, nb_reduce_dims, cell_per_block, thread_per_cell, idleOp<T>(), sumOp<T>(), divCountOp<T>(), num_cell); break; default: break; } } template void adaptive_pool<float>(float *output, const float *input, int *input_dims, int *output_dims, int nb_dims, int nb_reduce_dims, PoolType pool_type, hipStream_t stream); } // namespace plugin } // namespace amirstan
8f1e684a962e4d88cdc488ef9d9d30b9bf275c5c.cu
#include <cuda_fp16.h> #include <stdio.h> #include <algorithm> #include <cmath> #include "adaptive_pool.h" #include "amir_cuda_util/cuda_util.h" namespace amirstan { namespace plugin { #define START_IND(a, b, c) (int)std::floor((float)(a * c) / b) #define END_IND(a, b, c) (int)std::ceil((float)((a + 1) * c) / b) using namespace amirstan::cuda; using amirstan::cuda::TensorSize; using amirstan::cuda::TensorStride; template <typename T> __host__ __device__ __forceinline__ T ceilDiv(T a, T b) { return (a + b - 1) / b; } template <typename T> struct idleOp { inline __device__ T operator()(const T &x, size_t reduce_count = 0) { return x; } }; template <typename T> struct maxOp { inline __device__ T operator()(const T &x, const T &y) { return x > y ? x : y; } }; template <typename T> struct sumOp { inline __device__ T operator()(const T &x, const T &y) { return x + y; } }; template <typename T> struct divCountOp { inline __device__ T operator()(const T &x, size_t reduce_count = 0) { return x / reduce_count; } }; template <typename T, typename PreReduceOp, typename ReduceOp, typename PostReduceOp> __global__ void adaptive_pool_kernel( T *output, const T *input, TensorSize input_size, TensorStride input_stride, TensorSize output_size, TensorStride output_stride, int nb_dims, int nb_reduce_dims, int cell_per_block, int thread_per_cell, PreReduceOp pre_reduce_op, ReduceOp reduce_op, PostReduceOp post_reduce_op, size_t N) { const int REDUCE_MAX_COUNT = 10; float reduce_size[REDUCE_MAX_COUNT]; float reduce_stride[REDUCE_MAX_COUNT]; extern __shared__ T smemChar[]; for (int cell_id = blockIdx.x * cell_per_block + threadIdx.x / thread_per_cell; cell_id < (N); cell_id += cell_per_block * gridDim.x) { // exit if thread belong to no cell if (threadIdx.x >= cell_per_block * thread_per_cell) { break; } // how many element in cell and the start input offset of cell size_t cell_count = 1; size_t cell_input_offset = 0; int tmp_cell_id = cell_id; for (int i = 0; i < nb_dims; ++i) { const int idx = tmp_cell_id / output_stride.size[i]; tmp_cell_id = tmp_cell_id % output_stride.size[i]; const int input_idx_start = START_IND(idx, output_size.size[i], input_size.size[i]); const int input_idx_end = END_IND(idx, output_size.size[i], input_size.size[i]); cell_count *= (input_idx_end - input_idx_start); reduce_size[i] = (input_idx_end - input_idx_start); cell_input_offset += input_idx_start * input_stride.size[i]; } cell_count = max(cell_count, size_t(1)); reduce_stride[nb_dims - 1] = 1; for (int i = nb_dims - 2; i >= 0; --i) { reduce_stride[i] = reduce_stride[i + 1] * reduce_size[i + 1]; } // put data in share memory const int cell_thread_id = threadIdx.x % thread_per_cell; for (int cell_data_id = cell_thread_id; cell_data_id < cell_count; cell_data_id += thread_per_cell) { int tmp_cell_data_id = cell_data_id; size_t input_idx = 0; for (int i = nb_dims - nb_reduce_dims; i < nb_dims; ++i) { const int idx = tmp_cell_data_id / reduce_stride[i]; tmp_cell_data_id = tmp_cell_data_id % int(reduce_stride[i]); input_idx += idx * input_stride.size[i]; } if (cell_data_id == cell_thread_id) { smemChar[threadIdx.x] = pre_reduce_op(input[cell_input_offset + input_idx], cell_count); } else { smemChar[threadIdx.x] = reduce_op( smemChar[threadIdx.x], pre_reduce_op(input[cell_input_offset + input_idx], cell_count)); } } __syncthreads(); ///// reduce smemChar for (unsigned int reduce_step = thread_per_cell / 2; reduce_step > 0; reduce_step >>= 1) { if (cell_thread_id < reduce_step && cell_thread_id < cell_count) { smemChar[threadIdx.x] = reduce_op(smemChar[threadIdx.x], smemChar[threadIdx.x + reduce_step]); } __syncthreads(); } if (cell_thread_id == 0) { // for(int i=1;i<min(size_t(thread_per_cell),size_t(cell_count));++i){ // smemChar[threadIdx.x]=reduce_op(smemChar[threadIdx.x], // smemChar[threadIdx.x+i]); // } output[cell_id] = post_reduce_op(smemChar[threadIdx.x], cell_count); } } } template <typename T> void adaptive_pool(T *output, const T *input, int *input_dims, int *output_dims, int nb_dims, int nb_reduce_dims, PoolType pool_type, cudaStream_t stream) { TensorSize ts_input_size; TensorStride input_stride; memcpy(&(ts_input_size.size[0]), input_dims, sizeof(int) * nb_dims); input_stride.size[nb_dims - 1] = 1; for (int i = nb_dims - 2; i >= 0; --i) { input_stride.size[i] = input_stride.size[i + 1] * ts_input_size.size[i + 1]; } TensorSize ts_output_size; TensorStride output_stride; memcpy(&ts_output_size.size[0], output_dims, sizeof(int) * nb_dims); output_stride.size[nb_dims - 1] = 1; for (int i = nb_dims - 2; i >= 0; --i) { output_stride.size[i] = output_stride.size[i + 1] * ts_output_size.size[i + 1]; } size_t reduce_cell_count = 1; for (int i = nb_dims - nb_reduce_dims; i < nb_dims; ++i) { size_t reduce_size = ceilDiv<size_t>(ts_input_size.size[i], ts_output_size.size[i]); reduce_cell_count *= max(size_t(1), reduce_size); } size_t num_cell = output_stride.size[0] * ts_output_size.size[0]; int thread_per_cell = 1; while ((thread_per_cell << 1) < reduce_cell_count) { thread_per_cell = thread_per_cell << 1; } thread_per_cell = std::min(CUDA_NUM_THREADS, thread_per_cell); int cell_per_block = std::max(1, CUDA_NUM_THREADS / thread_per_cell); int share_size = CUDA_NUM_THREADS * sizeof(T); int num_block = ceilDiv<int>(num_cell, cell_per_block); num_block = min(num_block, kMaxGridNum); switch (pool_type) { case PoolType::MAX: adaptive_pool_kernel<T, idleOp<T>, maxOp<T>, idleOp<T>> <<<num_block, CUDA_NUM_THREADS, share_size, stream>>>( output, input, ts_input_size, input_stride, ts_output_size, output_stride, nb_dims, nb_reduce_dims, cell_per_block, thread_per_cell, idleOp<T>(), maxOp<T>(), idleOp<T>(), num_cell); break; case PoolType::AVERAGE: adaptive_pool_kernel<T, idleOp<T>, sumOp<T>, divCountOp<T>> <<<num_block, CUDA_NUM_THREADS, share_size, stream>>>( output, input, ts_input_size, input_stride, ts_output_size, output_stride, nb_dims, nb_reduce_dims, cell_per_block, thread_per_cell, idleOp<T>(), sumOp<T>(), divCountOp<T>(), num_cell); break; default: break; } } template void adaptive_pool<float>(float *output, const float *input, int *input_dims, int *output_dims, int nb_dims, int nb_reduce_dims, PoolType pool_type, cudaStream_t stream); } // namespace plugin } // namespace amirstan
67a98d4fa533d97f52fe98ff3c108033e656ab2a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "SingleHashGraph.cuh" int main(int argc, char **argv) { int64_t countSize = 1L << 24; int64_t maxkey = 1L << 26; int64_t tableSize = maxkey; int64_t lrbBins = 16000; if (argc >= 3 && argc < 4) { std::cerr << "Please specify all arguments.\n"; return 1; } if (argc >= 3) { countSize = atoi(argv[1]); // countSize = 1L << sizeExp; maxkey = atoi(argv[2]); // maxkey = 1L << keyExp; lrbBins = atoi(argv[3]); } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float buildTime = 0.0f; // milliseoncds // rmm_mgpu_context_t context; // SingleHashGraph shg(countSize, maxkey, context, tableSize); SingleHashGraph shg(countSize, maxkey, tableSize, lrbBins); hipEventRecord(start); // shg.build(countSize, context, tableSize); shg.build(countSize, tableSize); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&buildTime, start, stop); std::cout << "single buildTable() time: " << (buildTime / 1000.0) << "\n"; // seconds }
67a98d4fa533d97f52fe98ff3c108033e656ab2a.cu
/* * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "SingleHashGraph.cuh" int main(int argc, char **argv) { int64_t countSize = 1L << 24; int64_t maxkey = 1L << 26; int64_t tableSize = maxkey; int64_t lrbBins = 16000; if (argc >= 3 && argc < 4) { std::cerr << "Please specify all arguments.\n"; return 1; } if (argc >= 3) { countSize = atoi(argv[1]); // countSize = 1L << sizeExp; maxkey = atoi(argv[2]); // maxkey = 1L << keyExp; lrbBins = atoi(argv[3]); } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float buildTime = 0.0f; // milliseoncds // rmm_mgpu_context_t context; // SingleHashGraph shg(countSize, maxkey, context, tableSize); SingleHashGraph shg(countSize, maxkey, tableSize, lrbBins); cudaEventRecord(start); // shg.build(countSize, context, tableSize); shg.build(countSize, tableSize); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&buildTime, start, stop); std::cout << "single buildTable() time: " << (buildTime / 1000.0) << "\n"; // seconds }
a13c55e214cc6787c3634db8dfefab33bbd9c7f7.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, true>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
a13c55e214cc6787c3634db8dfefab33bbd9c7f7.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, true>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
fde730c466643b798881af3067fb74d110a7fd7b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "fft_helper.h" #include <hipfft.h> namespace SCRIMP { __global__ void elementwise_multiply_inplace(const hipDoubleComplex* A, hipDoubleComplex* B, const int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < size) { B[tid] = cuCmul(A[tid], B[tid]); } } // A is input unaligned sliding dot products produced by ifft // out is the computed vector of distances __global__ void normalized_aligned_dot_products(const double* A, const double divisor, const unsigned int m, const unsigned int n, double* QT) { int a = blockIdx.x * blockDim.x + threadIdx.x; if (a < n) { QT[a] = A[a + m - 1] / divisor; } } __global__ void populate_reverse_pad(const double *Q, double *Q_reverse_pad, const double *mean, const int window_size, const int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; double mu = *mean; if(tid < window_size) { Q_reverse_pad[tid] = Q[window_size - 1 - tid] - mu; }else if(tid < size){ Q_reverse_pad[tid] = 0; } } SCRIMPError_t fft_precompute_helper::compute_QT(double* QT, const double* T, const double *Q, const double *qmeans, hipStream_t s) { hipfftResult cufftError; hipError_t error; const int n = size - window_size + 1; dim3 block(fft_work_size, 1, 1); cufftError = hipfftSetStream(fft_plan, s); if (cufftError != HIPFFT_SUCCESS) { return SCRIMP_CUFFT_ERROR; } cufftError = hipfftSetStream(ifft_plan,s); if (cufftError != HIPFFT_SUCCESS) { return SCRIMP_CUFFT_ERROR; } // Compute the FFT of the time series cufftError = hipfftExecD2Z(fft_plan, const_cast<double*>(T), Tc); if (cufftError != HIPFFT_SUCCESS) { return SCRIMP_CUFFT_EXEC_ERROR; } // Reverse and zero pad the query hipLaunchKernelGGL(( populate_reverse_pad), dim3(dim3(ceil(size / (float) fft_work_size),1,1)), dim3(block), 0, s, Q, Q_reverse_pad, qmeans, window_size, size); error = hipPeekAtLastError(); if (error != hipSuccess) { return SCRIMP_CUDA_ERROR; } cufftError = hipfftExecD2Z(fft_plan, Q_reverse_pad, Qc); if (cufftError != HIPFFT_SUCCESS) { return SCRIMP_CUFFT_EXEC_ERROR; } hipLaunchKernelGGL(( elementwise_multiply_inplace), dim3(dim3(ceil(cufft_data_size / (float) fft_work_size), 1, 1)), dim3(block), 0, s, Tc, Qc, cufft_data_size); error = hipPeekAtLastError(); if ( error != hipSuccess) { return SCRIMP_CUDA_ERROR; } cufftError = hipfftExecZ2D(ifft_plan, Qc, Q_reverse_pad); if (cufftError != HIPFFT_SUCCESS) { return SCRIMP_CUFFT_EXEC_ERROR; } hipLaunchKernelGGL(( normalized_aligned_dot_products), dim3(dim3(ceil(n / (float) fft_work_size), 1, 1)), dim3(block), 0, s, Q_reverse_pad, size, window_size, n, QT); error = hipPeekAtLastError(); if(error != hipSuccess) { return SCRIMP_CUDA_ERROR; } return SCRIMP_NO_ERROR; } }
fde730c466643b798881af3067fb74d110a7fd7b.cu
#include "fft_helper.h" #include <cufft.h> namespace SCRIMP { __global__ void elementwise_multiply_inplace(const cuDoubleComplex* A, cuDoubleComplex* B, const int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < size) { B[tid] = cuCmul(A[tid], B[tid]); } } // A is input unaligned sliding dot products produced by ifft // out is the computed vector of distances __global__ void normalized_aligned_dot_products(const double* A, const double divisor, const unsigned int m, const unsigned int n, double* QT) { int a = blockIdx.x * blockDim.x + threadIdx.x; if (a < n) { QT[a] = A[a + m - 1] / divisor; } } __global__ void populate_reverse_pad(const double *Q, double *Q_reverse_pad, const double *mean, const int window_size, const int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; double mu = *mean; if(tid < window_size) { Q_reverse_pad[tid] = Q[window_size - 1 - tid] - mu; }else if(tid < size){ Q_reverse_pad[tid] = 0; } } SCRIMPError_t fft_precompute_helper::compute_QT(double* QT, const double* T, const double *Q, const double *qmeans, cudaStream_t s) { cufftResult cufftError; cudaError_t error; const int n = size - window_size + 1; dim3 block(fft_work_size, 1, 1); cufftError = cufftSetStream(fft_plan, s); if (cufftError != CUFFT_SUCCESS) { return SCRIMP_CUFFT_ERROR; } cufftError = cufftSetStream(ifft_plan,s); if (cufftError != CUFFT_SUCCESS) { return SCRIMP_CUFFT_ERROR; } // Compute the FFT of the time series cufftError = cufftExecD2Z(fft_plan, const_cast<double*>(T), Tc); if (cufftError != CUFFT_SUCCESS) { return SCRIMP_CUFFT_EXEC_ERROR; } // Reverse and zero pad the query populate_reverse_pad<<<dim3(ceil(size / (float) fft_work_size),1,1), block, 0, s>>>(Q, Q_reverse_pad, qmeans, window_size, size); error = cudaPeekAtLastError(); if (error != cudaSuccess) { return SCRIMP_CUDA_ERROR; } cufftError = cufftExecD2Z(fft_plan, Q_reverse_pad, Qc); if (cufftError != CUFFT_SUCCESS) { return SCRIMP_CUFFT_EXEC_ERROR; } elementwise_multiply_inplace<<<dim3(ceil(cufft_data_size / (float) fft_work_size), 1, 1), block, 0, s>>>(Tc, Qc, cufft_data_size); error = cudaPeekAtLastError(); if ( error != cudaSuccess) { return SCRIMP_CUDA_ERROR; } cufftError = cufftExecZ2D(ifft_plan, Qc, Q_reverse_pad); if (cufftError != CUFFT_SUCCESS) { return SCRIMP_CUFFT_EXEC_ERROR; } normalized_aligned_dot_products<<<dim3(ceil(n / (float) fft_work_size), 1, 1), block, 0, s>>>(Q_reverse_pad, size, window_size, n, QT); error = cudaPeekAtLastError(); if(error != cudaSuccess) { return SCRIMP_CUDA_ERROR; } return SCRIMP_NO_ERROR; } }
caffd28d36594d76f6396a6a13a852bbabae684f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/device_vector.h> #include <iostream> #include <generics/ldg.h> //Just for example purposes - a non plain old data type template<typename T> struct non_pod { T x; T y; T z; __host__ __device__ non_pod() {} __host__ __device__ non_pod(T a, T b, T c) : x(a), y(b), z(c) {} friend std::ostream& operator<<(std::ostream& os, const non_pod& f) { os << (int)f.x << " " << (int)f.y << " " << (int)f.z; return os; } }; //Uses LDG to copy an element from one array into another template<typename T> __global__ void test_ldg(const T* i, T* o) { *o = __ldg(i); } int main() { //sizeof(non_pod<char>) is 3 //will use 8-bit LDG loads typedef non_pod<char> non_pod3; thrust::device_vector<non_pod3> i3(1); thrust::device_vector<non_pod3> o3(1); //Initialize input i3[0] = non_pod3(1,2,3); //Use LDG to copy hipLaunchKernelGGL(( test_ldg), dim3(1),dim3(1), 0, 0, thrust::raw_pointer_cast(i3.data()), thrust::raw_pointer_cast(o3.data())); //Retrieve result non_pod3 r3 = o3[0]; std::cout << r3 << std::endl; //sizeof(non_pod<int>) is 12 //will use 32-bit LDG loads typedef non_pod<int> non_pod12; thrust::device_vector<non_pod12> i12(1); thrust::device_vector<non_pod12> o12(1); i12[0] = non_pod12(4,5,6); hipLaunchKernelGGL(( test_ldg), dim3(1),dim3(1), 0, 0, thrust::raw_pointer_cast(i12.data()), thrust::raw_pointer_cast(o12.data())); non_pod12 r12 = o12[0]; std::cout << r12 << std::endl; }
caffd28d36594d76f6396a6a13a852bbabae684f.cu
#include <thrust/device_vector.h> #include <iostream> #include <generics/ldg.h> //Just for example purposes - a non plain old data type template<typename T> struct non_pod { T x; T y; T z; __host__ __device__ non_pod() {} __host__ __device__ non_pod(T a, T b, T c) : x(a), y(b), z(c) {} friend std::ostream& operator<<(std::ostream& os, const non_pod& f) { os << (int)f.x << " " << (int)f.y << " " << (int)f.z; return os; } }; //Uses LDG to copy an element from one array into another template<typename T> __global__ void test_ldg(const T* i, T* o) { *o = __ldg(i); } int main() { //sizeof(non_pod<char>) is 3 //will use 8-bit LDG loads typedef non_pod<char> non_pod3; thrust::device_vector<non_pod3> i3(1); thrust::device_vector<non_pod3> o3(1); //Initialize input i3[0] = non_pod3(1,2,3); //Use LDG to copy test_ldg<<<1,1>>>(thrust::raw_pointer_cast(i3.data()), thrust::raw_pointer_cast(o3.data())); //Retrieve result non_pod3 r3 = o3[0]; std::cout << r3 << std::endl; //sizeof(non_pod<int>) is 12 //will use 32-bit LDG loads typedef non_pod<int> non_pod12; thrust::device_vector<non_pod12> i12(1); thrust::device_vector<non_pod12> o12(1); i12[0] = non_pod12(4,5,6); test_ldg<<<1,1>>>(thrust::raw_pointer_cast(i12.data()), thrust::raw_pointer_cast(o12.data())); non_pod12 r12 = o12[0]; std::cout << r12 << std::endl; }
ea89ca1b6b7e8b2b35473ff3a91b3b9a29a7befb.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/CUDAGeneratorImpl.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/DistributionTemplates.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand_kernel.h> #include <utility> #include <functional> #include <ATen/native/Distributions.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <THH/THHGeneral.h> #include <THH/THHApply.cuh> #include <THH/THHDeviceUtils.cuh> #include <cstdint> #include <limits> #include <utility> #include <type_traits> namespace at { namespace native { void geometric_kernel(TensorIterator& iter, double p_, c10::optional<Generator> gen) { auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::geometric_kernel(iter, p_, generator); } REGISTER_DISPATCH(geometric_stub, &geometric_kernel); }} // namespace at::native
ea89ca1b6b7e8b2b35473ff3a91b3b9a29a7befb.cu
#include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/CUDAGeneratorImpl.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/DistributionTemplates.h> #include <curand.h> #include <curand_kernel.h> #include <curand_philox4x32_x.h> #include <utility> #include <functional> #include <ATen/native/Distributions.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <THC/THCGeneral.h> #include <THC/THCApply.cuh> #include <THC/THCDeviceUtils.cuh> #include <cstdint> #include <limits> #include <utility> #include <type_traits> namespace at { namespace native { void geometric_kernel(TensorIterator& iter, double p_, c10::optional<Generator> gen) { auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::geometric_kernel(iter, p_, generator); } REGISTER_DISPATCH(geometric_stub, &geometric_kernel); }} // namespace at::native
2c7247a778cb4ed4e53579aa7a0ab05e265950f3.hip
// !!! This is a file automatically generated by hipify!!! #include <THHUNN/THHUNN.h> #include <THHUNN/generic/SpatialMaxPooling.hip> #include <THH/THHGenerateFloatTypes.h>
2c7247a778cb4ed4e53579aa7a0ab05e265950f3.cu
#include <THCUNN/THCUNN.h> #include <THCUNN/generic/SpatialMaxPooling.cu> #include <THC/THCGenerateFloatTypes.h>
46341c242d60fdd9affff16a33895b549481cd60.hip
// !!! This is a file automatically generated by hipify!!! #include "jacketSDK.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "hip/device_functions.h" #include <iostream> #define TPB 96 __global__ void D2Q9_SNL_VNL_MRT_ts(float * fOut, float * fIn, const int * snl, const int * vnl, const float * ux_p, const float * uy_p, float * omega_op, int Nx, int Ny){ int tid = threadIdx.x+blockIdx.x*blockDim.x; int nnodes = Nx*Ny; if(tid<nnodes){ float fi1,fi2,fi3,fi4,fi5,fi6,fi7,fi8,fi9; float fe1,fe2,fe3,fe4,fe5,fe6,fe7,fe8,fe9; float fo1,fo2,fo3,fo4,fo5,fo6,fo7,fo8,fo9; float w,cu; __shared__ float omega[9][9]; //get the density data for the lattice point. fi1=fIn[tid]; fi2=fIn[nnodes+tid]; fi3=fIn[2*nnodes+tid]; fi4=fIn[3*nnodes+tid]; fi5=fIn[4*nnodes+tid]; fi6=fIn[5*nnodes+tid]; fi7=fIn[6*nnodes+tid]; fi8=fIn[7*nnodes+tid]; fi9=fIn[8*nnodes+tid]; //load portion of omega into shared memory if(threadIdx.x<81){ int col=threadIdx.x/9; int row = threadIdx.x-col*9; omega[row][col]=*(omega_op+9*col+row); } //compute rho float rho = fi1+fi2+fi3+fi4+fi5+fi6+fi7+fi8+fi9; //compute velocity float ux = (1/rho)*(fi2+fi6+fi9 - (fi7+fi4+fi8)); float uy = (1/rho)*(fi6+fi3+fi7 - (fi8+fi5+fi9)); if(vnl[tid]==1){ //moving boundary float dx=ux_p[tid]-ux; float dy = uy_p[tid]-uy; //speed 2 w=1./9.; cu = 3.*(dx); fi2+=w*rho*cu; //speed 3 cu=3.*dy; fi3+=w*rho*cu; //speed 4 cu=3.*(-dx); fi4+=w*rho*cu; //speed 5 cu = 3.*(-dy); fi5+=w*rho*cu; //speed 6 w=1./36.; cu=3.*(dx+dy); fi6+=w*rho*cu; //speed 7 cu=3.*(-dx+dy); fi7+=w*rho*cu; //speed 8 cu=3.*(-dx-dy); fi8+=w*rho*cu; //speed 9 cu=3.*(dx-dy); fi9+=w*rho*cu; //also, update macroscopic velocity... ux +=dx; uy+=dy; } if(snl[tid]==1){ ux=0.; uy=0.; } //everybody compute fEq //speed 1 w = 4./9.; cu = 0.; fe1 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 2 w = 1./9.; cu = 3.0*ux; fe2 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 3 cu = 3.0*uy; fe3 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 4 cu = -3.0*ux; fe4 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 5 cu = -3.0*uy; fe5 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 6 w = 1./36.; cu = 3.0*(ux+uy); fe6 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 7 cu = 3.0*(-ux+uy); fe7 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 8 cu = 3.0*(-ux-uy); fe8 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 9 cu= 3.0*(ux-uy); fe9 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //really...I need fe to equal the non-equilibrium part... fe1=fi1-fe1; fe2=fi2-fe2; fe3=fi3-fe3; fe4=fi4-fe4; fe5=fi5-fe5; fe6=fi6-fe6; fe7=fi7-fe7; fe8=fi8-fe8; fe9=fi9-fe9; //MRT relaxation __syncthreads();//make sure omega is loaded... fo1=fi1-(fe1*omega[0][0]+fe2*omega[1][0]+fe3*omega[2][0]+fe4*omega[3][0]+fe5*omega[4][0]+fe6*omega[5][0]+fe7*omega[6][0]+fe8*omega[7][0]+fe9*omega[8][0]); fo2=fi2-(fe1*omega[0][1]+fe2*omega[1][1]+fe3*omega[2][1]+fe4*omega[3][1]+fe5*omega[4][1]+fe6*omega[5][1]+fe7*omega[6][1]+fe8*omega[7][1]+fe9*omega[8][1]); fo3=fi3-(fe1*omega[0][2]+fe2*omega[1][2]+fe3*omega[2][2]+fe4*omega[3][2]+fe5*omega[4][2]+fe6*omega[5][2]+fe7*omega[6][2]+fe8*omega[7][2]+fe9*omega[8][2]); fo4=fi4-(fe1*omega[0][3]+fe2*omega[1][3]+fe3*omega[2][3]+fe4*omega[3][3]+fe5*omega[4][3]+fe6*omega[5][3]+fe7*omega[6][3]+fe8*omega[7][3]+fe9*omega[8][3]); fo5=fi5-(fe1*omega[0][4]+fe2*omega[1][4]+fe3*omega[2][4]+fe4*omega[3][4]+fe5*omega[4][4]+fe6*omega[5][4]+fe7*omega[6][4]+fe8*omega[7][4]+fe9*omega[8][4]); fo6=fi6-(fe1*omega[0][5]+fe2*omega[1][5]+fe3*omega[2][5]+fe4*omega[3][5]+fe5*omega[4][5]+fe6*omega[5][5]+fe7*omega[6][5]+fe8*omega[7][5]+fe9*omega[8][5]); fo7=fi7-(fe1*omega[0][6]+fe2*omega[1][6]+fe3*omega[2][6]+fe4*omega[3][6]+fe5*omega[4][6]+fe6*omega[5][6]+fe7*omega[6][6]+fe8*omega[7][6]+fe9*omega[8][6]); fo8=fi8-(fe1*omega[0][7]+fe2*omega[1][7]+fe3*omega[2][7]+fe4*omega[3][7]+fe5*omega[4][7]+fe6*omega[5][7]+fe7*omega[6][7]+fe8*omega[7][7]+fe9*omega[8][7]); fo9=fi9-(fe1*omega[0][8]+fe2*omega[1][8]+fe3*omega[2][8]+fe4*omega[3][8]+fe5*omega[4][8]+fe6*omega[5][8]+fe7*omega[6][8]+fe8*omega[7][8]+fe9*omega[8][8]); //bounce-back nodes do this instead... if(snl[tid]==1){ fo1=fi1; fo2=fi4; fo4=fi2; fo3=fi5; fo5=fi3; fo6=fi8; fo8=fi6; fo7=fi9; fo9=fi7; ux = 0.; uy = 0.; }//if(solid_node_list[tid]==1)... // stream the result... //compute the local stream vector... int x; int y; int yn; int ys; int xe; int xw; //int dir; int dof_num; //int f_num; x = tid%Nx+1; y = ((tid+1)-x+1)/Nx + 1; yn = y%Ny+1; xe = x%Nx+1; if(y==1){ ys = Ny; }else{ ys = y-1; } if(x==1){ xw=Nx; }else{ xw=x-1; } dof_num = Nx*(y-1)+x; fOut[dof_num-1]=fo1; dof_num=Nx*(y-1)+xe; fOut[nnodes+dof_num-1]=fo2; dof_num=Nx*(yn-1)+x; fOut[2*nnodes+dof_num-1]=fo3; dof_num=Nx*(y-1)+xw; fOut[3*nnodes+dof_num-1]=fo4; dof_num=Nx*(ys-1)+x; fOut[4*nnodes+dof_num-1]=fo5; dof_num=Nx*(yn-1)+xe; fOut[5*nnodes+dof_num-1]=fo6; dof_num=Nx*(yn-1)+xw; fOut[6*nnodes+dof_num-1]=fo7; dof_num=Nx*(ys-1)+xw; fOut[7*nnodes+dof_num-1]=fo8; dof_num=Nx*(ys-1)+xe; fOut[8*nnodes+dof_num-1]=fo9; } } err_t jktFunction(int nlhs, mxArray * plhs[], int nrhs, mxArray * prhs[]){ if(nrhs!=9) return err("Usage: D2Q9_SNL_VNL_MRT_ts(fOut,fIn,snl,vnl,ux_p,uy_p,omega_op,Nx,Ny"); mxArray * m_fOut = prhs[0]; mxArray * m_fIn = prhs[1]; mxArray * m_snl = prhs[2]; mxArray * m_vnl = prhs[3]; mxArray * m_ux_p = prhs[4]; mxArray * m_uy_p = prhs[5]; mxArray * m_omega_op = prhs[6]; int Nx = mxGetScalar(prhs[7]); int Ny = mxGetScalar(prhs[8]); int nnodes = Nx*Ny; float * fOut; float * fIn; int * snl; int * vnl; float * ux_p; float * uy_p; float * omega_op; jkt_mem((void**)&fOut,m_fOut); jkt_mem((void**)&fIn,m_fIn); jkt_mem((void**)&snl,m_snl); jkt_mem((void**)&vnl,m_vnl); jkt_mem((void**)&ux_p,m_ux_p); jkt_mem((void**)&uy_p,m_uy_p); jkt_mem((void**)&omega_op,m_omega_op); dim3 BLOCKS(TPB,1,1); dim3 GRIDS((nnodes+TPB-1)/TPB,1,1); hipLaunchKernelGGL(( D2Q9_SNL_VNL_MRT_ts), dim3(GRIDS),dim3(BLOCKS), 0, 0, fOut,fIn,snl,vnl,ux_p,uy_p,omega_op,Nx,Ny); return errNone; }
46341c242d60fdd9affff16a33895b549481cd60.cu
#include "jacketSDK.h" #include <cuda.h> #include <cuda_runtime.h> #include "device_functions.h" #include <iostream> #define TPB 96 __global__ void D2Q9_SNL_VNL_MRT_ts(float * fOut, float * fIn, const int * snl, const int * vnl, const float * ux_p, const float * uy_p, float * omega_op, int Nx, int Ny){ int tid = threadIdx.x+blockIdx.x*blockDim.x; int nnodes = Nx*Ny; if(tid<nnodes){ float fi1,fi2,fi3,fi4,fi5,fi6,fi7,fi8,fi9; float fe1,fe2,fe3,fe4,fe5,fe6,fe7,fe8,fe9; float fo1,fo2,fo3,fo4,fo5,fo6,fo7,fo8,fo9; float w,cu; __shared__ float omega[9][9]; //get the density data for the lattice point. fi1=fIn[tid]; fi2=fIn[nnodes+tid]; fi3=fIn[2*nnodes+tid]; fi4=fIn[3*nnodes+tid]; fi5=fIn[4*nnodes+tid]; fi6=fIn[5*nnodes+tid]; fi7=fIn[6*nnodes+tid]; fi8=fIn[7*nnodes+tid]; fi9=fIn[8*nnodes+tid]; //load portion of omega into shared memory if(threadIdx.x<81){ int col=threadIdx.x/9; int row = threadIdx.x-col*9; omega[row][col]=*(omega_op+9*col+row); } //compute rho float rho = fi1+fi2+fi3+fi4+fi5+fi6+fi7+fi8+fi9; //compute velocity float ux = (1/rho)*(fi2+fi6+fi9 - (fi7+fi4+fi8)); float uy = (1/rho)*(fi6+fi3+fi7 - (fi8+fi5+fi9)); if(vnl[tid]==1){ //moving boundary float dx=ux_p[tid]-ux; float dy = uy_p[tid]-uy; //speed 2 w=1./9.; cu = 3.*(dx); fi2+=w*rho*cu; //speed 3 cu=3.*dy; fi3+=w*rho*cu; //speed 4 cu=3.*(-dx); fi4+=w*rho*cu; //speed 5 cu = 3.*(-dy); fi5+=w*rho*cu; //speed 6 w=1./36.; cu=3.*(dx+dy); fi6+=w*rho*cu; //speed 7 cu=3.*(-dx+dy); fi7+=w*rho*cu; //speed 8 cu=3.*(-dx-dy); fi8+=w*rho*cu; //speed 9 cu=3.*(dx-dy); fi9+=w*rho*cu; //also, update macroscopic velocity... ux +=dx; uy+=dy; } if(snl[tid]==1){ ux=0.; uy=0.; } //everybody compute fEq //speed 1 w = 4./9.; cu = 0.; fe1 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 2 w = 1./9.; cu = 3.0*ux; fe2 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 3 cu = 3.0*uy; fe3 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 4 cu = -3.0*ux; fe4 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 5 cu = -3.0*uy; fe5 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 6 w = 1./36.; cu = 3.0*(ux+uy); fe6 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 7 cu = 3.0*(-ux+uy); fe7 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 8 cu = 3.0*(-ux-uy); fe8 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //speed 9 cu= 3.0*(ux-uy); fe9 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy)); //really...I need fe to equal the non-equilibrium part... fe1=fi1-fe1; fe2=fi2-fe2; fe3=fi3-fe3; fe4=fi4-fe4; fe5=fi5-fe5; fe6=fi6-fe6; fe7=fi7-fe7; fe8=fi8-fe8; fe9=fi9-fe9; //MRT relaxation __syncthreads();//make sure omega is loaded... fo1=fi1-(fe1*omega[0][0]+fe2*omega[1][0]+fe3*omega[2][0]+fe4*omega[3][0]+fe5*omega[4][0]+fe6*omega[5][0]+fe7*omega[6][0]+fe8*omega[7][0]+fe9*omega[8][0]); fo2=fi2-(fe1*omega[0][1]+fe2*omega[1][1]+fe3*omega[2][1]+fe4*omega[3][1]+fe5*omega[4][1]+fe6*omega[5][1]+fe7*omega[6][1]+fe8*omega[7][1]+fe9*omega[8][1]); fo3=fi3-(fe1*omega[0][2]+fe2*omega[1][2]+fe3*omega[2][2]+fe4*omega[3][2]+fe5*omega[4][2]+fe6*omega[5][2]+fe7*omega[6][2]+fe8*omega[7][2]+fe9*omega[8][2]); fo4=fi4-(fe1*omega[0][3]+fe2*omega[1][3]+fe3*omega[2][3]+fe4*omega[3][3]+fe5*omega[4][3]+fe6*omega[5][3]+fe7*omega[6][3]+fe8*omega[7][3]+fe9*omega[8][3]); fo5=fi5-(fe1*omega[0][4]+fe2*omega[1][4]+fe3*omega[2][4]+fe4*omega[3][4]+fe5*omega[4][4]+fe6*omega[5][4]+fe7*omega[6][4]+fe8*omega[7][4]+fe9*omega[8][4]); fo6=fi6-(fe1*omega[0][5]+fe2*omega[1][5]+fe3*omega[2][5]+fe4*omega[3][5]+fe5*omega[4][5]+fe6*omega[5][5]+fe7*omega[6][5]+fe8*omega[7][5]+fe9*omega[8][5]); fo7=fi7-(fe1*omega[0][6]+fe2*omega[1][6]+fe3*omega[2][6]+fe4*omega[3][6]+fe5*omega[4][6]+fe6*omega[5][6]+fe7*omega[6][6]+fe8*omega[7][6]+fe9*omega[8][6]); fo8=fi8-(fe1*omega[0][7]+fe2*omega[1][7]+fe3*omega[2][7]+fe4*omega[3][7]+fe5*omega[4][7]+fe6*omega[5][7]+fe7*omega[6][7]+fe8*omega[7][7]+fe9*omega[8][7]); fo9=fi9-(fe1*omega[0][8]+fe2*omega[1][8]+fe3*omega[2][8]+fe4*omega[3][8]+fe5*omega[4][8]+fe6*omega[5][8]+fe7*omega[6][8]+fe8*omega[7][8]+fe9*omega[8][8]); //bounce-back nodes do this instead... if(snl[tid]==1){ fo1=fi1; fo2=fi4; fo4=fi2; fo3=fi5; fo5=fi3; fo6=fi8; fo8=fi6; fo7=fi9; fo9=fi7; ux = 0.; uy = 0.; }//if(solid_node_list[tid]==1)... // stream the result... //compute the local stream vector... int x; int y; int yn; int ys; int xe; int xw; //int dir; int dof_num; //int f_num; x = tid%Nx+1; y = ((tid+1)-x+1)/Nx + 1; yn = y%Ny+1; xe = x%Nx+1; if(y==1){ ys = Ny; }else{ ys = y-1; } if(x==1){ xw=Nx; }else{ xw=x-1; } dof_num = Nx*(y-1)+x; fOut[dof_num-1]=fo1; dof_num=Nx*(y-1)+xe; fOut[nnodes+dof_num-1]=fo2; dof_num=Nx*(yn-1)+x; fOut[2*nnodes+dof_num-1]=fo3; dof_num=Nx*(y-1)+xw; fOut[3*nnodes+dof_num-1]=fo4; dof_num=Nx*(ys-1)+x; fOut[4*nnodes+dof_num-1]=fo5; dof_num=Nx*(yn-1)+xe; fOut[5*nnodes+dof_num-1]=fo6; dof_num=Nx*(yn-1)+xw; fOut[6*nnodes+dof_num-1]=fo7; dof_num=Nx*(ys-1)+xw; fOut[7*nnodes+dof_num-1]=fo8; dof_num=Nx*(ys-1)+xe; fOut[8*nnodes+dof_num-1]=fo9; } } err_t jktFunction(int nlhs, mxArray * plhs[], int nrhs, mxArray * prhs[]){ if(nrhs!=9) return err("Usage: D2Q9_SNL_VNL_MRT_ts(fOut,fIn,snl,vnl,ux_p,uy_p,omega_op,Nx,Ny"); mxArray * m_fOut = prhs[0]; mxArray * m_fIn = prhs[1]; mxArray * m_snl = prhs[2]; mxArray * m_vnl = prhs[3]; mxArray * m_ux_p = prhs[4]; mxArray * m_uy_p = prhs[5]; mxArray * m_omega_op = prhs[6]; int Nx = mxGetScalar(prhs[7]); int Ny = mxGetScalar(prhs[8]); int nnodes = Nx*Ny; float * fOut; float * fIn; int * snl; int * vnl; float * ux_p; float * uy_p; float * omega_op; jkt_mem((void**)&fOut,m_fOut); jkt_mem((void**)&fIn,m_fIn); jkt_mem((void**)&snl,m_snl); jkt_mem((void**)&vnl,m_vnl); jkt_mem((void**)&ux_p,m_ux_p); jkt_mem((void**)&uy_p,m_uy_p); jkt_mem((void**)&omega_op,m_omega_op); dim3 BLOCKS(TPB,1,1); dim3 GRIDS((nnodes+TPB-1)/TPB,1,1); D2Q9_SNL_VNL_MRT_ts<<<GRIDS,BLOCKS>>>(fOut,fIn,snl,vnl,ux_p,uy_p,omega_op,Nx,Ny); return errNone; }
6cc71dc38831ed2252266ddb339c765eba43272e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "support.h" __global__ void kernelP(int m, int n, int k, const float *A, const float *B, float* C) { const unsigned int BLOCK_SIZE = 32; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * BLOCK_SIZE + ty; int Col = bx * BLOCK_SIZE + tx; if (Row < m && Col < n) for(unsigned int i = 0; i < k; ++i) C[Row*n + Col] += A[Row*k + i]*B[i*n + Col]; __syncthreads(); } void midP(char transa, char transb, \ int m, int n, int k, \ float alpha, \ const float *A, int lda, \ const float *B, int ldb, \ float beta, \ float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { printf("unsupported value of 'transa'\n"); return; } if ((transb != 'N') && (transb != 'n')) { printf("unsupported value of 'transb'\n"); return; } if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) { printf("unsupported value of alpha\n"); return; } if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) { printf("unsupported value of beta\n"); return; } const unsigned int BLOCK_SIZE = 32; unsigned int grid_y = (unsigned int) ceil((double)m / (double)BLOCK_SIZE); unsigned int grid_x = (unsigned int) ceil((double)n / (double)BLOCK_SIZE); dim3 gridDim(grid_x, grid_y); dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE); hipLaunchKernelGGL(( kernelP), dim3(gridDim), dim3(blockDim), 0, 0, m, n, k, A, B, C); } int main (int argc, char *argv[]) { Timer timer; printf("\nRunning Non-Tiled..."); fflush(stdout); startTime(&timer); float *A_h, *B_h, *C_h; float *A_d, *B_d, *C_d; size_t A_sz, B_sz, C_sz; unsigned matArow, matAcol; unsigned matBrow, matBcol; dim3 dim_grid, dim_block; if (argc == 1) { matArow = 1000; matAcol = matBrow = 1000; matBcol = 1000; } else if (argc == 2) { matArow = atoi(argv[1]); matAcol = matBrow = atoi(argv[1]); matBcol = atoi(argv[1]); } else if (argc == 4) { matArow = atoi(argv[1]); matAcol = matBrow = atoi(argv[2]); matBcol = atoi(argv[3]); } else { printf("\n Invalid input parameters!" "\n Usage: ./sgemm # All matrices are 1000 x 1000" "\n Usage: ./sgemm <m> # All matrices are m x m" "\n Usage: ./sgemm <m> <k> <n> # A: m x k, B: k x n, C: m x n" "\n"); exit(0); } A_sz = matArow*matAcol; B_sz = matBrow*matBcol; C_sz = matArow*matBcol; A_h = (float*) malloc( sizeof(float)*A_sz ); for (unsigned int i=0; i < A_sz; i++) A_h[i] = (rand()%100)/100.00; B_h = (float*) malloc( sizeof(float)*B_sz ); for (unsigned int i=0; i < B_sz; i++) B_h[i] = (rand()%100)/100.00; C_h = (float*) malloc( sizeof(float)*C_sz ); printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol, matBrow, matBcol, matArow, matBcol); hipMalloc((void **) &A_d, sizeof(float)*A_sz); hipMalloc((void **) &B_d, sizeof(float)*B_sz); hipMalloc((void **) &C_d, sizeof(float)*C_sz); hipDeviceSynchronize(); hipMemcpy(A_d, A_h, sizeof(float)*A_sz, hipMemcpyHostToDevice); hipMemcpy(B_d, B_h, sizeof(float)*B_sz, hipMemcpyHostToDevice); hipDeviceSynchronize(); midP('N', 'N', matArow, matBcol, matBrow, 1.0f, A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow); hipDeviceSynchronize(); hipMemcpy(C_h, C_d, sizeof(float)*C_sz, hipMemcpyDeviceToHost); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf("Verifying results..."); fflush(stdout); verify(A_h, B_h, C_h, matArow, matAcol, matBcol); free(A_h); free(B_h); free(C_h); hipFree(A_d); hipFree(B_d); hipFree(C_d); return 0; }
6cc71dc38831ed2252266ddb339c765eba43272e.cu
#include <stdio.h> #include <stdlib.h> #include "support.h" __global__ void kernelP(int m, int n, int k, const float *A, const float *B, float* C) { const unsigned int BLOCK_SIZE = 32; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * BLOCK_SIZE + ty; int Col = bx * BLOCK_SIZE + tx; if (Row < m && Col < n) for(unsigned int i = 0; i < k; ++i) C[Row*n + Col] += A[Row*k + i]*B[i*n + Col]; __syncthreads(); } void midP(char transa, char transb, \ int m, int n, int k, \ float alpha, \ const float *A, int lda, \ const float *B, int ldb, \ float beta, \ float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { printf("unsupported value of 'transa'\n"); return; } if ((transb != 'N') && (transb != 'n')) { printf("unsupported value of 'transb'\n"); return; } if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) { printf("unsupported value of alpha\n"); return; } if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) { printf("unsupported value of beta\n"); return; } const unsigned int BLOCK_SIZE = 32; unsigned int grid_y = (unsigned int) ceil((double)m / (double)BLOCK_SIZE); unsigned int grid_x = (unsigned int) ceil((double)n / (double)BLOCK_SIZE); dim3 gridDim(grid_x, grid_y); dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE); kernelP<<<gridDim, blockDim>>>(m, n, k, A, B, C); } int main (int argc, char *argv[]) { Timer timer; printf("\nRunning Non-Tiled..."); fflush(stdout); startTime(&timer); float *A_h, *B_h, *C_h; float *A_d, *B_d, *C_d; size_t A_sz, B_sz, C_sz; unsigned matArow, matAcol; unsigned matBrow, matBcol; dim3 dim_grid, dim_block; if (argc == 1) { matArow = 1000; matAcol = matBrow = 1000; matBcol = 1000; } else if (argc == 2) { matArow = atoi(argv[1]); matAcol = matBrow = atoi(argv[1]); matBcol = atoi(argv[1]); } else if (argc == 4) { matArow = atoi(argv[1]); matAcol = matBrow = atoi(argv[2]); matBcol = atoi(argv[3]); } else { printf("\n Invalid input parameters!" "\n Usage: ./sgemm # All matrices are 1000 x 1000" "\n Usage: ./sgemm <m> # All matrices are m x m" "\n Usage: ./sgemm <m> <k> <n> # A: m x k, B: k x n, C: m x n" "\n"); exit(0); } A_sz = matArow*matAcol; B_sz = matBrow*matBcol; C_sz = matArow*matBcol; A_h = (float*) malloc( sizeof(float)*A_sz ); for (unsigned int i=0; i < A_sz; i++) A_h[i] = (rand()%100)/100.00; B_h = (float*) malloc( sizeof(float)*B_sz ); for (unsigned int i=0; i < B_sz; i++) B_h[i] = (rand()%100)/100.00; C_h = (float*) malloc( sizeof(float)*C_sz ); printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol, matBrow, matBcol, matArow, matBcol); cudaMalloc((void **) &A_d, sizeof(float)*A_sz); cudaMalloc((void **) &B_d, sizeof(float)*B_sz); cudaMalloc((void **) &C_d, sizeof(float)*C_sz); cudaDeviceSynchronize(); cudaMemcpy(A_d, A_h, sizeof(float)*A_sz, cudaMemcpyHostToDevice); cudaMemcpy(B_d, B_h, sizeof(float)*B_sz, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); midP('N', 'N', matArow, matBcol, matBrow, 1.0f, A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow); cudaDeviceSynchronize(); cudaMemcpy(C_h, C_d, sizeof(float)*C_sz, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf("Verifying results..."); fflush(stdout); verify(A_h, B_h, C_h, matArow, matAcol, matBcol); free(A_h); free(B_h); free(C_h); cudaFree(A_d); cudaFree(B_d); cudaFree(C_d); return 0; }
2b10e48fa54c1ef4a6a2d83893a6321b2894c71a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <time.h> #define PerThread 1024*4*8//i #define N 64*256*1024*4//PI #define BlockNum 32 //block #define ThreadNum 64 //blockthreads __global__ void Gpu_calPI(double* Gpu_list) { __shared__ double cache[ThreadNum];//blockshared memory. int cacheIdx=threadIdx.x; int tid=blockIdx.x*blockDim.x*blockDim.y+threadIdx.x; int begin=tid*PerThread; int end=begin+PerThread-1; double temp=0; for(int i=begin;i<end;i++){ temp+=4.0/(1+((i+0.5)/(N))*((i+0.5)/(N))); } cache[cacheIdx]=temp; __syncthreads();// int i=blockDim.x/2; while(i!=0){ if(cacheIdx<i) cache[cacheIdx]+=cache[cacheIdx+i]; __syncthreads();// i=i/2; } if(cacheIdx==0){ Gpu_list[blockIdx.x]=cache[0]; } } int main(void) { double * cpu_list; double * Gpu_list; double outcome=0; cpu_list=(double*)malloc(sizeof(double)*BlockNum); hipMalloc((void**)&Gpu_list,sizeof(double)*BlockNum); // dim3 blocksize=dim3(1,ThreadNum); // dim3 gridsize=dim3(1,BlockNum); double begin = clock(); hipLaunchKernelGGL(( Gpu_calPI), dim3(BlockNum),dim3(ThreadNum), 0, 0, Gpu_list); hipMemcpy(cpu_list,Gpu_list,sizeof(double)*BlockNum,hipMemcpyDeviceToHost); for(int i=0;i<BlockNum;i++){ outcome+=cpu_list[i]; } outcome=outcome/(N); double end=clock(); printf("Scu1: N=%d, outcome=%.10f,time spend %.10f\n",N,outcome,(end-begin)/(CLOCKS_PER_SEC)); // printf("block x=%d,y=%d\n",blocksize.x,blocksize.y); // printf("grid x=%d,y=%d\n",gridsize.x,gridsize.y); }
2b10e48fa54c1ef4a6a2d83893a6321b2894c71a.cu
#include <stdio.h> #include <time.h> #define PerThread 1024*4*8//每个线程计算多少个i #define N 64*256*1024*4//积分计算PI总共划分为这么多项相加 #define BlockNum 32 //block的数量 #define ThreadNum 64 //每个block中threads的数量 __global__ void Gpu_calPI(double* Gpu_list) { __shared__ double cache[ThreadNum];//每个block共享一个shared memory. int cacheIdx=threadIdx.x; int tid=blockIdx.x*blockDim.x*blockDim.y+threadIdx.x; int begin=tid*PerThread; int end=begin+PerThread-1; double temp=0; for(int i=begin;i<end;i++){ temp+=4.0/(1+((i+0.5)/(N))*((i+0.5)/(N))); } cache[cacheIdx]=temp; __syncthreads();//同步 int i=blockDim.x/2; while(i!=0){ if(cacheIdx<i) cache[cacheIdx]+=cache[cacheIdx+i]; __syncthreads();//同步 i=i/2; } if(cacheIdx==0){ Gpu_list[blockIdx.x]=cache[0]; } } int main(void) { double * cpu_list; double * Gpu_list; double outcome=0; cpu_list=(double*)malloc(sizeof(double)*BlockNum); cudaMalloc((void**)&Gpu_list,sizeof(double)*BlockNum); // dim3 blocksize=dim3(1,ThreadNum); // dim3 gridsize=dim3(1,BlockNum); double begin = clock(); Gpu_calPI<<<BlockNum,ThreadNum>>>(Gpu_list); cudaMemcpy(cpu_list,Gpu_list,sizeof(double)*BlockNum,cudaMemcpyDeviceToHost); for(int i=0;i<BlockNum;i++){ outcome+=cpu_list[i]; } outcome=outcome/(N); double end=clock(); printf("Scu1: N=%d, outcome=%.10f,time spend %.10f\n",N,outcome,(end-begin)/(CLOCKS_PER_SEC)); // printf("block x=%d,y=%d\n",blocksize.x,blocksize.y); // printf("grid x=%d,y=%d\n",gridsize.x,gridsize.y); }
bf04a3a825d8902a76127a947188e3c9cbc21911.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /// Copyright (C) 2016 Giuseppe Bilotta <[email protected]> /// License: GPLv3 #define restrict __restrict__ typedef unsigned int uint; typedef unsigned int hashKey; typedef ushort4 particleinfo; __global__ void initParticles( particleinfo * restrict infoArray, hashKey * restrict hashArray, uint * restrict idxArray, uint numParticles) { uint idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx > numParticles) return; idxArray[idx] = idx; particleinfo info; info.x = idx % 4; info.y = 0; info.z = (ushort)(idx & 0xffff); info.w = (ushort)(idx >> 16); infoArray[idx] = info; hashArray[idx] = idx/17 + (idx % (idx & 17)); }
bf04a3a825d8902a76127a947188e3c9cbc21911.cu
#include "includes.h" /// Copyright (C) 2016 Giuseppe Bilotta <[email protected]> /// License: GPLv3 #define restrict __restrict__ typedef unsigned int uint; typedef unsigned int hashKey; typedef ushort4 particleinfo; __global__ void initParticles( particleinfo * restrict infoArray, hashKey * restrict hashArray, uint * restrict idxArray, uint numParticles) { uint idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx > numParticles) return; idxArray[idx] = idx; particleinfo info; info.x = idx % 4; info.y = 0; info.z = (ushort)(idx & 0xffff); info.w = (ushort)(idx >> 16); infoArray[idx] = info; hashArray[idx] = idx/17 + (idx % (idx & 17)); }
f8093b44673f7a4bc84e59b0e6772996df527e7c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// /// matmultKernel00.cu /// For CSU CS575 Spring 2011 /// Instructor: Wim Bohm /// Based on code from the CUDA Programming Guide /// Modified by Wim Bohm and David Newman /// Created: 2011-01-27 /// Last Modified: 2011-02-23 DVN /// /// Multiplies two matrices using CUDA: A x B = C /// /// Copy this file and modify the MatMultKernel device function for /// each of your experiments. /// #include "matmultKernel.h" // Define a gpu kernel to perform matrix multiplication // of A x B = C. __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){ // matrix blocks float *Asub, *Bsub, *Csub; // Putting these into registers speeds access. int thread_row = threadIdx.y; int thread_col = threadIdx.x; //this is where the thread blocks start in the thread space int block_row = blockIdx.y; int block_col = blockIdx.x; //this is where they go into memory int footprint_row = block_row*2; int footprint_col = block_col*2; //4 different C values float Cvalue0 = 0; float Cvalue1 = 0; float Cvalue2 = 0; float Cvalue3 = 0; Csub = &C.elements[C.stride * footprint_row * FOOTPRINT_SIZE + FOOTPRINT_SIZE * footprint_col]; for (int m = 0; m < (A.width / FOOTPRINT_SIZE); ++m){ Asub = &A.elements[A.stride * FOOTPRINT_SIZE * footprint_row + FOOTPRINT_SIZE * m]; Bsub = &B.elements[B.stride * FOOTPRINT_SIZE * m + FOOTPRINT_SIZE * footprint_col]; __shared__ float shared_A[FOOTPRINT_SIZE][FOOTPRINT_SIZE]; __shared__ float shared_B[FOOTPRINT_SIZE][FOOTPRINT_SIZE]; shared_A[thread_row][thread_col] = Asub[A.stride * thread_row + thread_col]; shared_B[thread_row][thread_col] = Bsub[B.stride * thread_row + thread_col]; shared_A[thread_row][thread_col+BLOCK_SIZE] = Asub[A.stride * thread_row + thread_col+BLOCK_SIZE]; shared_B[thread_row][thread_col+BLOCK_SIZE] = Bsub[B.stride * thread_row + thread_col+BLOCK_SIZE]; shared_A[thread_row+BLOCK_SIZE][thread_col] = Asub[A.stride * (thread_row + BLOCK_SIZE) + thread_col]; shared_B[thread_row+BLOCK_SIZE][thread_col] = Bsub[B.stride * (thread_row + BLOCK_SIZE) + thread_col]; shared_A[thread_row+BLOCK_SIZE][thread_col+BLOCK_SIZE] = Asub[A.stride * (thread_row + BLOCK_SIZE) + thread_col+BLOCK_SIZE]; shared_B[thread_row+BLOCK_SIZE][thread_col+BLOCK_SIZE] = Bsub[B.stride * (thread_row + BLOCK_SIZE) + thread_col+BLOCK_SIZE]; //make sure all threads __syncthreads(); #pragma unroll for( int e = 0; e<FOOTPRINT_SIZE; e++){ //top left and top right Cvalue0 += shared_A[thread_row][e]*shared_B[e][e]; Cvalue1 += shared_A[thread_row][e]*shared_B[e][thread_col+BLOCK_SIZE]; Cvalue2 += shared_A[thread_row+BLOCK_SIZE][e]*shared_B[e][thread_col]; Cvalue3 += shared_A[thread_row+BLOCK_SIZE][e]*shared_B[e][thread_col+BLOCK_SIZE]; } // Synchronize to ensure all Cvalues have been incremented // before reading in the next shared_A AND shared_B BLOCKS __syncthreads(); } // Write Csub to GLOBAL memory. // Each thread writes its own cell value. Csub[thread_row * C.stride + thread_col ] = Cvalue0; Csub[thread_row * C.stride + thread_col + BLOCK_SIZE] = Cvalue1; Csub[thread_row * C.stride + thread_col + BLOCK_SIZE*C.stride] = Cvalue2; Csub[thread_row * C.stride + thread_col + BLOCK_SIZE*C.stride + BLOCK_SIZE] = Cvalue3; }
f8093b44673f7a4bc84e59b0e6772996df527e7c.cu
/// /// matmultKernel00.cu /// For CSU CS575 Spring 2011 /// Instructor: Wim Bohm /// Based on code from the CUDA Programming Guide /// Modified by Wim Bohm and David Newman /// Created: 2011-01-27 /// Last Modified: 2011-02-23 DVN /// /// Multiplies two matrices using CUDA: A x B = C /// /// Copy this file and modify the MatMultKernel device function for /// each of your experiments. /// #include "matmultKernel.h" // Define a gpu kernel to perform matrix multiplication // of A x B = C. __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){ // matrix blocks float *Asub, *Bsub, *Csub; // Putting these into registers speeds access. int thread_row = threadIdx.y; int thread_col = threadIdx.x; //this is where the thread blocks start in the thread space int block_row = blockIdx.y; int block_col = blockIdx.x; //this is where they go into memory int footprint_row = block_row*2; int footprint_col = block_col*2; //4 different C values float Cvalue0 = 0; float Cvalue1 = 0; float Cvalue2 = 0; float Cvalue3 = 0; Csub = &C.elements[C.stride * footprint_row * FOOTPRINT_SIZE + FOOTPRINT_SIZE * footprint_col]; for (int m = 0; m < (A.width / FOOTPRINT_SIZE); ++m){ Asub = &A.elements[A.stride * FOOTPRINT_SIZE * footprint_row + FOOTPRINT_SIZE * m]; Bsub = &B.elements[B.stride * FOOTPRINT_SIZE * m + FOOTPRINT_SIZE * footprint_col]; __shared__ float shared_A[FOOTPRINT_SIZE][FOOTPRINT_SIZE]; __shared__ float shared_B[FOOTPRINT_SIZE][FOOTPRINT_SIZE]; shared_A[thread_row][thread_col] = Asub[A.stride * thread_row + thread_col]; shared_B[thread_row][thread_col] = Bsub[B.stride * thread_row + thread_col]; shared_A[thread_row][thread_col+BLOCK_SIZE] = Asub[A.stride * thread_row + thread_col+BLOCK_SIZE]; shared_B[thread_row][thread_col+BLOCK_SIZE] = Bsub[B.stride * thread_row + thread_col+BLOCK_SIZE]; shared_A[thread_row+BLOCK_SIZE][thread_col] = Asub[A.stride * (thread_row + BLOCK_SIZE) + thread_col]; shared_B[thread_row+BLOCK_SIZE][thread_col] = Bsub[B.stride * (thread_row + BLOCK_SIZE) + thread_col]; shared_A[thread_row+BLOCK_SIZE][thread_col+BLOCK_SIZE] = Asub[A.stride * (thread_row + BLOCK_SIZE) + thread_col+BLOCK_SIZE]; shared_B[thread_row+BLOCK_SIZE][thread_col+BLOCK_SIZE] = Bsub[B.stride * (thread_row + BLOCK_SIZE) + thread_col+BLOCK_SIZE]; //make sure all threads __syncthreads(); #pragma unroll for( int e = 0; e<FOOTPRINT_SIZE; e++){ //top left and top right Cvalue0 += shared_A[thread_row][e]*shared_B[e][e]; Cvalue1 += shared_A[thread_row][e]*shared_B[e][thread_col+BLOCK_SIZE]; Cvalue2 += shared_A[thread_row+BLOCK_SIZE][e]*shared_B[e][thread_col]; Cvalue3 += shared_A[thread_row+BLOCK_SIZE][e]*shared_B[e][thread_col+BLOCK_SIZE]; } // Synchronize to ensure all Cvalues have been incremented // before reading in the next shared_A AND shared_B BLOCKS __syncthreads(); } // Write Csub to GLOBAL memory. // Each thread writes its own cell value. Csub[thread_row * C.stride + thread_col ] = Cvalue0; Csub[thread_row * C.stride + thread_col + BLOCK_SIZE] = Cvalue1; Csub[thread_row * C.stride + thread_col + BLOCK_SIZE*C.stride] = Cvalue2; Csub[thread_row * C.stride + thread_col + BLOCK_SIZE*C.stride + BLOCK_SIZE] = Cvalue3; }
abbb6143185da4bd332667e11895ec047587b4eb.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2018 BlazingDB, Inc. * Copyright 2018 Alexander Ocsa <[email protected]> * Copyright 2018 Felipe Aramburu <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gtest/gtest.h" #include <iostream> #include <gdf/gdf.h> #include <gdf/cffi/functions.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <hip/hip_runtime.h> #include <tuple> #include "helper/utils.cuh" #include "gdf_test_fixtures.h" /* ============================================================================ Description : Compute gpu_comparison and apply_stencil of gdf_columns using Thrust on GPU ============================================================================ */ struct FilterOperationsTest : public GdfTest {}; TEST_F(FilterOperationsTest, usage_example) { using LeftValueType = int16_t; using RightValueType = int16_t; int column_size = 10; int init_value = 10; int max_size = 4; gdf_comparison_operator gdf_operator = GDF_EQUALS; gdf_column lhs = gen_gdb_column<LeftValueType>(column_size, init_value); // 4, 2, 0 gdf_column rhs = gen_gdb_column<RightValueType>(column_size, 0.01 + max_size - init_value); // 0, 2, 4 gdf_column output = gen_gdb_column<int8_t>(column_size, 0); gdf_error error = gpu_comparison(&lhs, &rhs, &output, gdf_operator); EXPECT_TRUE(error == GDF_SUCCESS); std::cout << "Left" << std::endl; print_column<LeftValueType>(&lhs); std::cout << "Right" << std::endl; print_column<RightValueType>(&rhs); std::cout << "Output" << std::endl; print_column<int8_t>(&output); check_column_for_comparison_operation<LeftValueType, RightValueType>(&lhs, &rhs, &output, gdf_operator); /// lhs.dtype === rhs.dtype gpu_apply_stencil(&lhs, &output, &rhs); check_column_for_stencil_operation<LeftValueType, RightValueType>(&lhs, &output, &rhs); delete_gdf_column(&lhs); delete_gdf_column(&rhs); delete_gdf_column(&output); } template <typename LeftValueType, typename RightValueType> void test_filterops_using_templates(gdf_comparison_operator gdf_operator = GDF_EQUALS) { //0, ..., 100, //100, 10000, 10000, 100000 for (int column_size = 0; column_size < 10; column_size += 1) { const int max_size = 8; for (int init_value = 0; init_value <= 1; init_value++) { gdf_column lhs = gen_gdb_column<LeftValueType>(column_size, init_value); // 4, 2, 0 // lhs.null_count = 2; gdf_column rhs = gen_gdb_column<RightValueType>(column_size, 0.01 + max_size - init_value); // 0, 2, 4 // rhs.null_count = 1; gdf_column output = gen_gdb_column<int8_t>(column_size, 0); gdf_error error = gpu_comparison(&lhs, &rhs, &output, gdf_operator); EXPECT_TRUE(error == GDF_SUCCESS); check_column_for_comparison_operation<LeftValueType, RightValueType>(&lhs, &rhs, &output, gdf_operator); if (lhs.dtype == rhs.dtype ) { gpu_apply_stencil(&lhs, &output, &rhs); check_column_for_stencil_operation<LeftValueType, RightValueType>(&lhs, &output, &rhs); } delete_gdf_column(&lhs); delete_gdf_column(&rhs); delete_gdf_column(&output); } } } TEST_F(FilterOperationsTest, WithInt8AndOthers) { test_filterops_using_templates<int8_t, int8_t>(); test_filterops_using_templates<int8_t, int16_t>(); test_filterops_using_templates<int8_t, int32_t>(); test_filterops_using_templates<int8_t, int64_t>(); test_filterops_using_templates<int8_t, float>(); test_filterops_using_templates<int8_t, double>(); } TEST_F(FilterOperationsTest, WithInt16AndOthers) { test_filterops_using_templates<int16_t, int8_t>(); test_filterops_using_templates<int16_t, int16_t>(); test_filterops_using_templates<int16_t, int32_t>(); test_filterops_using_templates<int16_t, int64_t>(); test_filterops_using_templates<int16_t, float>(); test_filterops_using_templates<int16_t, double>(); } TEST_F(FilterOperationsTest, WithInt32AndOthers) { test_filterops_using_templates<int32_t, int8_t>(); test_filterops_using_templates<int32_t, int16_t>(); test_filterops_using_templates<int32_t, int32_t>(); test_filterops_using_templates<int32_t, int64_t>(); test_filterops_using_templates<int32_t, float>(); test_filterops_using_templates<int32_t, double>(); } TEST_F(FilterOperationsTest, WithInt64AndOthers) { test_filterops_using_templates<int64_t, int8_t>(); test_filterops_using_templates<int64_t, int16_t>(); test_filterops_using_templates<int64_t, int32_t>(); test_filterops_using_templates<int64_t, int64_t>(); test_filterops_using_templates<int64_t, float>(); test_filterops_using_templates<int64_t, double>(); } TEST_F(FilterOperationsTest, WithFloat32AndOthers) { test_filterops_using_templates<float, int8_t>(); test_filterops_using_templates<float, int16_t>(); test_filterops_using_templates<float, int32_t>(); test_filterops_using_templates<float, int64_t>(); test_filterops_using_templates<float, float>(); test_filterops_using_templates<float, double>(); } TEST_F(FilterOperationsTest, WithFloat64AndOthers) { test_filterops_using_templates<double, int8_t>(); test_filterops_using_templates<double, int16_t>(); test_filterops_using_templates<double, int32_t>(); test_filterops_using_templates<double, int64_t>(); test_filterops_using_templates<double, float>(); test_filterops_using_templates<double, double>(); }
abbb6143185da4bd332667e11895ec047587b4eb.cu
/* * Copyright 2018 BlazingDB, Inc. * Copyright 2018 Alexander Ocsa <[email protected]> * Copyright 2018 Felipe Aramburu <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gtest/gtest.h" #include <iostream> #include <gdf/gdf.h> #include <gdf/cffi/functions.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <cuda_runtime.h> #include <tuple> #include "helper/utils.cuh" #include "gdf_test_fixtures.h" /* ============================================================================ Description : Compute gpu_comparison and apply_stencil of gdf_columns using Thrust on GPU ============================================================================ */ struct FilterOperationsTest : public GdfTest {}; TEST_F(FilterOperationsTest, usage_example) { using LeftValueType = int16_t; using RightValueType = int16_t; int column_size = 10; int init_value = 10; int max_size = 4; gdf_comparison_operator gdf_operator = GDF_EQUALS; gdf_column lhs = gen_gdb_column<LeftValueType>(column_size, init_value); // 4, 2, 0 gdf_column rhs = gen_gdb_column<RightValueType>(column_size, 0.01 + max_size - init_value); // 0, 2, 4 gdf_column output = gen_gdb_column<int8_t>(column_size, 0); gdf_error error = gpu_comparison(&lhs, &rhs, &output, gdf_operator); EXPECT_TRUE(error == GDF_SUCCESS); std::cout << "Left" << std::endl; print_column<LeftValueType>(&lhs); std::cout << "Right" << std::endl; print_column<RightValueType>(&rhs); std::cout << "Output" << std::endl; print_column<int8_t>(&output); check_column_for_comparison_operation<LeftValueType, RightValueType>(&lhs, &rhs, &output, gdf_operator); /// lhs.dtype === rhs.dtype gpu_apply_stencil(&lhs, &output, &rhs); check_column_for_stencil_operation<LeftValueType, RightValueType>(&lhs, &output, &rhs); delete_gdf_column(&lhs); delete_gdf_column(&rhs); delete_gdf_column(&output); } template <typename LeftValueType, typename RightValueType> void test_filterops_using_templates(gdf_comparison_operator gdf_operator = GDF_EQUALS) { //0, ..., 100, //100, 10000, 10000, 100000 for (int column_size = 0; column_size < 10; column_size += 1) { const int max_size = 8; for (int init_value = 0; init_value <= 1; init_value++) { gdf_column lhs = gen_gdb_column<LeftValueType>(column_size, init_value); // 4, 2, 0 // lhs.null_count = 2; gdf_column rhs = gen_gdb_column<RightValueType>(column_size, 0.01 + max_size - init_value); // 0, 2, 4 // rhs.null_count = 1; gdf_column output = gen_gdb_column<int8_t>(column_size, 0); gdf_error error = gpu_comparison(&lhs, &rhs, &output, gdf_operator); EXPECT_TRUE(error == GDF_SUCCESS); check_column_for_comparison_operation<LeftValueType, RightValueType>(&lhs, &rhs, &output, gdf_operator); if (lhs.dtype == rhs.dtype ) { gpu_apply_stencil(&lhs, &output, &rhs); check_column_for_stencil_operation<LeftValueType, RightValueType>(&lhs, &output, &rhs); } delete_gdf_column(&lhs); delete_gdf_column(&rhs); delete_gdf_column(&output); } } } TEST_F(FilterOperationsTest, WithInt8AndOthers) { test_filterops_using_templates<int8_t, int8_t>(); test_filterops_using_templates<int8_t, int16_t>(); test_filterops_using_templates<int8_t, int32_t>(); test_filterops_using_templates<int8_t, int64_t>(); test_filterops_using_templates<int8_t, float>(); test_filterops_using_templates<int8_t, double>(); } TEST_F(FilterOperationsTest, WithInt16AndOthers) { test_filterops_using_templates<int16_t, int8_t>(); test_filterops_using_templates<int16_t, int16_t>(); test_filterops_using_templates<int16_t, int32_t>(); test_filterops_using_templates<int16_t, int64_t>(); test_filterops_using_templates<int16_t, float>(); test_filterops_using_templates<int16_t, double>(); } TEST_F(FilterOperationsTest, WithInt32AndOthers) { test_filterops_using_templates<int32_t, int8_t>(); test_filterops_using_templates<int32_t, int16_t>(); test_filterops_using_templates<int32_t, int32_t>(); test_filterops_using_templates<int32_t, int64_t>(); test_filterops_using_templates<int32_t, float>(); test_filterops_using_templates<int32_t, double>(); } TEST_F(FilterOperationsTest, WithInt64AndOthers) { test_filterops_using_templates<int64_t, int8_t>(); test_filterops_using_templates<int64_t, int16_t>(); test_filterops_using_templates<int64_t, int32_t>(); test_filterops_using_templates<int64_t, int64_t>(); test_filterops_using_templates<int64_t, float>(); test_filterops_using_templates<int64_t, double>(); } TEST_F(FilterOperationsTest, WithFloat32AndOthers) { test_filterops_using_templates<float, int8_t>(); test_filterops_using_templates<float, int16_t>(); test_filterops_using_templates<float, int32_t>(); test_filterops_using_templates<float, int64_t>(); test_filterops_using_templates<float, float>(); test_filterops_using_templates<float, double>(); } TEST_F(FilterOperationsTest, WithFloat64AndOthers) { test_filterops_using_templates<double, int8_t>(); test_filterops_using_templates<double, int16_t>(); test_filterops_using_templates<double, int32_t>(); test_filterops_using_templates<double, int64_t>(); test_filterops_using_templates<double, float>(); test_filterops_using_templates<double, double>(); }
56c18ed16b4d9445fc80e3ecee67caf3b2f06586.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "histogram_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *magnitude = NULL; hipMalloc(&magnitude, XSIZE*YSIZE); float *phase = NULL; hipMalloc(&phase, XSIZE*YSIZE); float *histograms = NULL; hipMalloc(&histograms, XSIZE*YSIZE); int input_width = XSIZE; int input_height = YSIZE; int cell_grid_width = XSIZE; int cell_grid_height = YSIZE; int magnitude_step = 1; int phase_step = 1; int histograms_step = 1; int cell_width = XSIZE; int cell_height = YSIZE; int num_bins = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( histogram_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, magnitude,phase,histograms,input_width,input_height,cell_grid_width,cell_grid_height,magnitude_step,phase_step,histograms_step,cell_width,cell_height,num_bins); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( histogram_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, magnitude,phase,histograms,input_width,input_height,cell_grid_width,cell_grid_height,magnitude_step,phase_step,histograms_step,cell_width,cell_height,num_bins); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( histogram_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, magnitude,phase,histograms,input_width,input_height,cell_grid_width,cell_grid_height,magnitude_step,phase_step,histograms_step,cell_width,cell_height,num_bins); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
56c18ed16b4d9445fc80e3ecee67caf3b2f06586.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "histogram_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *magnitude = NULL; cudaMalloc(&magnitude, XSIZE*YSIZE); float *phase = NULL; cudaMalloc(&phase, XSIZE*YSIZE); float *histograms = NULL; cudaMalloc(&histograms, XSIZE*YSIZE); int input_width = XSIZE; int input_height = YSIZE; int cell_grid_width = XSIZE; int cell_grid_height = YSIZE; int magnitude_step = 1; int phase_step = 1; int histograms_step = 1; int cell_width = XSIZE; int cell_height = YSIZE; int num_bins = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); histogram_kernel<<<gridBlock,threadBlock>>>(magnitude,phase,histograms,input_width,input_height,cell_grid_width,cell_grid_height,magnitude_step,phase_step,histograms_step,cell_width,cell_height,num_bins); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { histogram_kernel<<<gridBlock,threadBlock>>>(magnitude,phase,histograms,input_width,input_height,cell_grid_width,cell_grid_height,magnitude_step,phase_step,histograms_step,cell_width,cell_height,num_bins); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { histogram_kernel<<<gridBlock,threadBlock>>>(magnitude,phase,histograms,input_width,input_height,cell_grid_width,cell_grid_height,magnitude_step,phase_step,histograms_step,cell_width,cell_height,num_bins); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b1446eef97231b20efdfd5b9fe7f9ab093203310.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kernelCollectDeadTriangles(int *cdeadTri, short *cnewtri, int *cmarker, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] >= 0) return ; int id = cmarker[x]; cdeadTri[id] = x; }
b1446eef97231b20efdfd5b9fe7f9ab093203310.cu
#include "includes.h" __global__ void kernelCollectDeadTriangles(int *cdeadTri, short *cnewtri, int *cmarker, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] >= 0) return ; int id = cmarker[x]; cdeadTri[id] = x; }
20237e8632bf19ea1fe4937398287e3f631912e6.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "common.h" struct softPlusupdateOutput_functor { const float threshold; const float beta; softPlusupdateOutput_functor(float threshold_, float beta_) : threshold(threshold_) , beta(beta_) {} __device__ void operator()(float *output, const float *input) const { float betain = beta * (*input); *output = ((betain) > threshold) ? *input : (1/beta) * log1p(exp(betain)); } }; void THNN_CudaSoftPlus_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, float beta, float threshold) { THCUNN_assertSameGPU(state, 2, input, output); THCudaTensor_resizeAs(state, output, input); THCudaTensor_pointwiseApply2(state, output, input, softPlusupdateOutput_functor(threshold, beta)); } struct softPlusupdateGradInput_functor { const float threshold; const float beta; softPlusupdateGradInput_functor(float threshold_, float beta_) : threshold(threshold_) , beta(beta_) {} __device__ void operator()(float *gradInput, const float *output, const float *gradOutput) const { float betaout = beta * (*output); float exp_bo = exp(betaout); *gradInput = ((betaout) > threshold) ? *gradOutput : *gradOutput * (exp_bo - 1) / exp_bo; } }; void THNN_CudaSoftPlus_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *output, float beta, float threshold) { THCUNN_assertSameGPU(state, 4, input, output, gradOutput, gradInput); THCudaTensor_resizeAs(state, gradInput, output); THCudaTensor_pointwiseApply3(state, gradInput, output, gradOutput, softPlusupdateGradInput_functor(threshold, beta)); }
20237e8632bf19ea1fe4937398287e3f631912e6.cu
#include "THCUNN.h" #include "common.h" struct softPlusupdateOutput_functor { const float threshold; const float beta; softPlusupdateOutput_functor(float threshold_, float beta_) : threshold(threshold_) , beta(beta_) {} __device__ void operator()(float *output, const float *input) const { float betain = beta * (*input); *output = ((betain) > threshold) ? *input : (1/beta) * log1p(exp(betain)); } }; void THNN_CudaSoftPlus_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, float beta, float threshold) { THCUNN_assertSameGPU(state, 2, input, output); THCudaTensor_resizeAs(state, output, input); THCudaTensor_pointwiseApply2(state, output, input, softPlusupdateOutput_functor(threshold, beta)); } struct softPlusupdateGradInput_functor { const float threshold; const float beta; softPlusupdateGradInput_functor(float threshold_, float beta_) : threshold(threshold_) , beta(beta_) {} __device__ void operator()(float *gradInput, const float *output, const float *gradOutput) const { float betaout = beta * (*output); float exp_bo = exp(betaout); *gradInput = ((betaout) > threshold) ? *gradOutput : *gradOutput * (exp_bo - 1) / exp_bo; } }; void THNN_CudaSoftPlus_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *output, float beta, float threshold) { THCUNN_assertSameGPU(state, 4, input, output, gradOutput, gradInput); THCudaTensor_resizeAs(state, gradInput, output); THCudaTensor_pointwiseApply3(state, gradInput, output, gradOutput, softPlusupdateGradInput_functor(threshold, beta)); }
83873bbc9b7a8adc966e4a1efda887bfca5052eb.hip
// !!! This is a file automatically generated by hipify!!! /* Sushil Dubey, Shashi Dugad, TIFR, July 2017 * * File Name: RawToClusterGPU.cu * Description: It converts Raw data into Digi Format on GPU * Finaly the Output of RawToDigi data is given to pixelClusterizer **/ // C++ includes #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <fstream> #include <iomanip> #include <iostream> // CUDA includes #include <hip/hip_runtime.h> // CMSSW includes #include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h" #include "CondFormats/SiPixelObjects/interface/SiPixelROCsStatusAndMapping.h" #include "DataFormats/FEDRawData/interface/FEDNumbering.h" #include "DataFormats/TrackerCommon/interface/TrackerTopology.h" #include "DataFormats/SiPixelDigi/interface/SiPixelDigiConstants.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" #include "HeterogeneousCore/CUDAUtilities/interface/host_unique_ptr.h" #include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuCalibPixel.h" #include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h" #include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClustering.h" // local includes #include "SiPixelRawToClusterGPUKernel.h" // #define GPU_DEBUG namespace pixelgpudetails { __device__ bool isBarrel(uint32_t rawId) { return (PixelSubdetector::PixelBarrel == ((rawId >> DetId::kSubdetOffset) & DetId::kSubdetMask)); } __device__ pixelgpudetails::DetIdGPU getRawId(const SiPixelROCsStatusAndMapping *cablingMap, uint8_t fed, uint32_t link, uint32_t roc) { uint32_t index = fed * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc; pixelgpudetails::DetIdGPU detId = { cablingMap->rawId[index], cablingMap->rocInDet[index], cablingMap->moduleId[index]}; return detId; } //reference http://cmsdoxygen.web.cern.ch/cmsdoxygen/CMSSW_9_2_0/doc/html/dd/d31/FrameConversion_8cc_source.html //http://cmslxr.fnal.gov/source/CondFormats/SiPixelObjects/src/PixelROC.cc?v=CMSSW_9_2_0#0071 // Convert local pixel to pixelgpudetails::global pixel __device__ pixelgpudetails::Pixel frameConversion( bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, pixelgpudetails::Pixel local) { int slopeRow = 0, slopeCol = 0; int rowOffset = 0, colOffset = 0; if (bpix) { if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1 if (rocIdInDetUnit < 8) { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1; } else { slopeRow = -1; slopeCol = 1; rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1; colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc; } // if roc } else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1 if (rocIdInDetUnit < 8) { slopeRow = -1; slopeCol = 1; rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1; colOffset = rocIdInDetUnit * pixelgpudetails::numColsInRoc; } else { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (16 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1; } } } else { // fpix if (side == -1) { // pannel 1 if (rocIdInDetUnit < 8) { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1; } else { slopeRow = -1; slopeCol = 1; rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1; colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc; } } else { // pannel 2 if (rocIdInDetUnit < 8) { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1; } else { slopeRow = -1; slopeCol = 1; rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1; colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc; } } // side } uint32_t gRow = rowOffset + slopeRow * local.row; uint32_t gCol = colOffset + slopeCol * local.col; // inside frameConversion row: gRow, column: gCol pixelgpudetails::Pixel global = {gRow, gCol}; return global; } // error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc template <bool debug = false> __device__ uint8_t conversionError(uint8_t fedId, uint8_t status) { uint8_t errorType = 0; switch (status) { case (1): { if constexpr (debug) printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId); errorType = 35; break; } case (2): { if constexpr (debug) printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId); errorType = 36; break; } case (3): { if constexpr (debug) printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId); errorType = 37; break; } case (4): { if constexpr (debug) printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId); errorType = 38; break; } default: if constexpr (debug) printf("Cabling check returned unexpected result, status = %i\n", status); }; return errorType; } __device__ bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) { /// row and column in ROC representation return ((rocRow < pixelgpudetails::numRowsInRoc) & (rocCol < pixelgpudetails::numColsInRoc)); } __device__ bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); } // error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc template <bool debug = false> __device__ uint8_t checkROC(uint32_t errorWord, uint8_t fedId, uint32_t link, const SiPixelROCsStatusAndMapping *cablingMap) { uint8_t errorType = (errorWord >> sipixelconstants::ROC_shift) & sipixelconstants::ERROR_mask; if (errorType < 25) return 0; bool errorFound = false; switch (errorType) { case (25): { errorFound = true; uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + 1; if (index > 1 && index <= cablingMap->size) { if (!(link == cablingMap->link[index] && 1 == cablingMap->roc[index])) errorFound = false; } if constexpr (debug) if (errorFound) printf("Invalid ROC = 25 found (errorType = 25)\n"); break; } case (26): { if constexpr (debug) printf("Gap word found (errorType = 26)\n"); errorFound = true; break; } case (27): { if constexpr (debug) printf("Dummy word found (errorType = 27)\n"); errorFound = true; break; } case (28): { if constexpr (debug) printf("Error fifo nearly full (errorType = 28)\n"); errorFound = true; break; } case (29): { if constexpr (debug) printf("Timeout on a channel (errorType = 29)\n"); if ((errorWord >> sipixelconstants::OMIT_ERR_shift) & sipixelconstants::OMIT_ERR_mask) { if constexpr (debug) printf("...first errorType=29 error, this gets masked out\n"); } errorFound = true; break; } case (30): { if constexpr (debug) printf("TBM error trailer (errorType = 30)\n"); int stateMatch_bits = 4; int stateMatch_shift = 8; uint32_t stateMatch_mask = ~(~uint32_t(0) << stateMatch_bits); int stateMatch = (errorWord >> stateMatch_shift) & stateMatch_mask; if (stateMatch != 1 && stateMatch != 8) { if constexpr (debug) printf("FED error 30 with unexpected State Bits (errorType = 30)\n"); } if (stateMatch == 1) errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30 errorFound = true; break; } case (31): { if constexpr (debug) printf("Event number error (errorType = 31)\n"); errorFound = true; break; } default: errorFound = false; }; return errorFound ? errorType : 0; } // error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc template <bool debug = false> __device__ uint32_t getErrRawID(uint8_t fedId, uint32_t errWord, uint32_t errorType, const SiPixelROCsStatusAndMapping *cablingMap) { uint32_t rID = 0xffffffff; switch (errorType) { case 25: case 30: case 31: case 36: case 40: { uint32_t roc = 1; uint32_t link = sipixelconstants::getLink(errWord); uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId; if (rID_temp != gpuClustering::invalidModuleId) rID = rID_temp; break; } case 29: { int chanNmbr = 0; const int DB0_shift = 0; const int DB1_shift = DB0_shift + 1; const int DB2_shift = DB1_shift + 1; const int DB3_shift = DB2_shift + 1; const int DB4_shift = DB3_shift + 1; const uint32_t DataBit_mask = ~(~uint32_t(0) << 1); int CH1 = (errWord >> DB0_shift) & DataBit_mask; int CH2 = (errWord >> DB1_shift) & DataBit_mask; int CH3 = (errWord >> DB2_shift) & DataBit_mask; int CH4 = (errWord >> DB3_shift) & DataBit_mask; int CH5 = (errWord >> DB4_shift) & DataBit_mask; int BLOCK_bits = 3; int BLOCK_shift = 8; uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits); int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask; int localCH = 1 * CH1 + 2 * CH2 + 3 * CH3 + 4 * CH4 + 5 * CH5; if (BLOCK % 2 == 0) chanNmbr = (BLOCK / 2) * 9 + localCH; else chanNmbr = ((BLOCK - 1) / 2) * 9 + 4 + localCH; if ((chanNmbr < 1) || (chanNmbr > 36)) break; // signifies unexpected result uint32_t roc = 1; uint32_t link = chanNmbr; uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId; if (rID_temp != gpuClustering::invalidModuleId) rID = rID_temp; break; } case 37: case 38: { uint32_t roc = sipixelconstants::getROC(errWord); uint32_t link = sipixelconstants::getLink(errWord); uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId; if (rID_temp != gpuClustering::invalidModuleId) rID = rID_temp; break; } default: break; }; return rID; } // Kernel to perform Raw to Digi conversion template <bool debug = false> __global__ void RawToDigi_kernel(const SiPixelROCsStatusAndMapping *cablingMap, const unsigned char *modToUnp, const uint32_t wordCounter, const uint32_t *word, const uint8_t *fedIds, SiPixelDigisCUDASOAView digisView, cms::cuda::SimpleVector<SiPixelErrorCompact> *err, bool useQualityInfo, bool includeErrors) { //if (threadIdx.x==0) printf("Event: %u blockIdx.x: %u start: %u end: %u\n", eventno, blockIdx.x, begin, end); int32_t first = threadIdx.x + blockIdx.x * blockDim.x; for (int32_t iloop = first, nend = wordCounter; iloop < nend; iloop += blockDim.x * gridDim.x) { auto gIndex = iloop; auto dvgi = digisView[gIndex]; dvgi.xx() = 0; dvgi.yy() = 0; dvgi.adc() = 0; bool skipROC = false; uint8_t fedId = fedIds[gIndex / 2]; // +1200; // initialize (too many coninue below) dvgi.pdigi() = 0; dvgi.rawIdArr() = 0; dvgi.moduleId() = gpuClustering::invalidModuleId; uint32_t ww = word[gIndex]; // Array containing 32 bit raw data if (ww == 0) { // 0 is an indicator of a noise/dead channel, skip these pixels during clusterization continue; } uint32_t link = sipixelconstants::getLink(ww); // Extract link uint32_t roc = sipixelconstants::getROC(ww); // Extract ROC in link uint8_t errorType = checkROC<debug>(ww, fedId, link, cablingMap); skipROC = (roc < pixelgpudetails::maxROCIndex) ? false : (errorType != 0); if (includeErrors and skipROC) { uint32_t rID = getErrRawID<debug>(fedId, ww, errorType, cablingMap); err->push_back(SiPixelErrorCompact{rID, ww, errorType, fedId}); continue; } // check for spurious channels if (roc > MAX_ROC or link > MAX_LINK) { if constexpr (debug) { printf("spurious roc %d found on link %d, detector %d (index %d)\n", roc, link, getRawId(cablingMap, fedId, link, 1).rawId, gIndex); } continue; } uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc; if (useQualityInfo) { skipROC = cablingMap->badRocs[index]; if (skipROC) continue; } skipROC = modToUnp[index]; if (skipROC) continue; pixelgpudetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc); uint32_t rawId = detId.rawId; uint32_t layer = 0; int side = 0, panel = 0, module = 0; bool barrel = isBarrel(rawId); if (barrel) { layer = (rawId >> pixelgpudetails::layerStartBit) & pixelgpudetails::layerMask; module = (rawId >> pixelgpudetails::moduleStartBit) & pixelgpudetails::moduleMask; side = (module < 5) ? -1 : 1; } else { // endcap ids layer = 0; panel = (rawId >> pixelgpudetails::panelStartBit) & pixelgpudetails::panelMask; side = (panel == 1) ? -1 : 1; } // ***special case of layer to 1 be handled here pixelgpudetails::Pixel localPix; if (layer == 1) { uint32_t col = sipixelconstants::getCol(ww); uint32_t row = sipixelconstants::getRow(ww); localPix.row = row; localPix.col = col; if (includeErrors) { if (not rocRowColIsValid(row, col)) { uint8_t error = conversionError<debug>(fedId, 3); //use the device function and fill the arrays err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId}); if constexpr (debug) printf("BPIX1 Error status: %i\n", error); continue; } } } else { // ***conversion rules for dcol and pxid uint32_t dcol = sipixelconstants::getDCol(ww); uint32_t pxid = sipixelconstants::getPxId(ww); uint32_t row = pixelgpudetails::numRowsInRoc - pxid / 2; uint32_t col = dcol * 2 + pxid % 2; localPix.row = row; localPix.col = col; if (includeErrors and not dcolIsValid(dcol, pxid)) { uint8_t error = conversionError<debug>(fedId, 3); err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId}); if constexpr (debug) printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc); continue; } } pixelgpudetails::Pixel globalPix = frameConversion(barrel, side, layer, detId.rocInDet, localPix); dvgi.xx() = globalPix.row; // origin shifting by 1 0-159 dvgi.yy() = globalPix.col; // origin shifting by 1 0-415 dvgi.adc() = sipixelconstants::getADC(ww); dvgi.pdigi() = pixelgpudetails::pack(globalPix.row, globalPix.col, dvgi.adc()); dvgi.moduleId() = detId.moduleId; dvgi.rawIdArr() = rawId; } // end of loop (gIndex < end) } // end of Raw to Digi kernel template <typename TrackerTraits> __global__ void fillHitsModuleStart(uint32_t const *__restrict__ clusInModule, uint32_t *__restrict__ moduleStart, uint32_t const *__restrict__ nModules, uint32_t *__restrict__ nModules_Clusters) { constexpr int nMaxModules = TrackerTraits::numberOfModules; constexpr int startBPIX2 = TrackerTraits::layerStart[1]; constexpr uint32_t maxHitsInModule = TrackerTraits::maxHitsInModule; assert(startBPIX2 < nMaxModules); assert(nMaxModules < 4096); // easy to extend at least till 32*1024 assert(nMaxModules > 1024); assert(1 == gridDim.x); assert(0 == blockIdx.x); int first = threadIdx.x; // limit to MaxHitsInModule; for (int i = first, iend = nMaxModules; i < iend; i += blockDim.x) { moduleStart[i + 1] = ::min(maxHitsInModule, clusInModule[i]); } constexpr bool isPhase2 = std::is_base_of<pixelTopology::Phase2, TrackerTraits>::value; __shared__ uint32_t ws[32]; cms::cuda::blockPrefixScan(moduleStart + 1, moduleStart + 1, 1024, ws); constexpr int lastModules = isPhase2 ? 1024 : nMaxModules - 1024; cms::cuda::blockPrefixScan(moduleStart + 1024 + 1, moduleStart + 1024 + 1, lastModules, ws); if constexpr (isPhase2) { cms::cuda::blockPrefixScan(moduleStart + 2048 + 1, moduleStart + 2048 + 1, 1024, ws); cms::cuda::blockPrefixScan(moduleStart + 3072 + 1, moduleStart + 3072 + 1, nMaxModules - 3072, ws); } for (int i = first + 1025, iend = isPhase2 ? 2049 : nMaxModules + 1; i < iend; i += blockDim.x) { moduleStart[i] += moduleStart[1024]; } __syncthreads(); if constexpr (isPhase2) { for (int i = first + 2049, iend = 3073; i < iend; i += blockDim.x) { moduleStart[i] += moduleStart[2048]; } __syncthreads(); for (int i = first + 3073, iend = nMaxModules + 1; i < iend; i += blockDim.x) { moduleStart[i] += moduleStart[3072]; } __syncthreads(); } if (threadIdx.x == 0) { // copy the number of modules nModules_Clusters[0] = *nModules; // last element holds the number of all clusters nModules_Clusters[1] = moduleStart[nMaxModules]; // element 96 is the start of BPIX2 (i.e. the number of clusters in BPIX1) nModules_Clusters[2] = moduleStart[startBPIX2]; } #ifdef GPU_DEBUG uint16_t maxH = isPhase2 ? 3027 : 1024; assert(0 == moduleStart[0]); auto c0 = ::min(maxHitsInModule, clusInModule[0]); assert(c0 == moduleStart[1]); assert(moduleStart[maxH] >= moduleStart[maxH - 1]); assert(moduleStart[maxH + 1] >= moduleStart[maxH]); assert(moduleStart[nMaxModules] >= moduleStart[maxH + 1]); constexpr int startFP1 = TrackerTraits::numberOfModulesInBarrel; constexpr int startLastFwd = TrackerTraits::layerStart[TrackerTraits::numberOfLayers]; for (int i = first, iend = nMaxModules + 1; i < iend; i += blockDim.x) { if (0 != i) assert(moduleStart[i] >= moduleStart[i - i]); // [BPX1, BPX2, BPX3, BPX4, FP1, FP2, FP3, FN1, FN2, FN3, LAST_VALID] // [ 0, 96, 320, 672, 1184, 1296, 1408, 1520, 1632, 1744, 1856] if (i == startBPIX2 || i == startFP1 || i == startLastFwd || i == nMaxModules) printf("moduleStart %d %d\n", i, moduleStart[i]); } #endif } // Interface to outside template <typename TrackerTraits> void SiPixelRawToClusterGPUKernel<TrackerTraits>::makePhase1ClustersAsync( bool isRun2, const SiPixelClusterThresholds clusterThresholds, const SiPixelROCsStatusAndMapping *cablingMap, const unsigned char *modToUnp, const SiPixelGainForHLTonGPU *gains, const WordFedAppender &wordFed, SiPixelFormatterErrors &&errors, const uint32_t wordCounter, const uint32_t fedCounter, bool useQualityInfo, bool includeErrors, bool debug, hipStream_t stream) { // we're not opting for calling this function in case of early events assert(wordCounter != 0); nDigis = wordCounter; #ifdef GPU_DEBUG std::cout << "decoding " << wordCounter << " digis." << std::endl; #endif // since wordCounter != 0 we're not allocating 0 bytes, // digis_d = SiPixelDigisCUDA(wordCounter, stream); digis_d = SiPixelDigisCUDA(size_t(wordCounter), stream); if (includeErrors) { digiErrors_d = SiPixelDigiErrorsCUDA(wordCounter, std::move(errors), stream); } clusters_d = SiPixelClustersCUDA(TrackerTraits::numberOfModules, stream); // Begin Raw2Digi block { const int threadsPerBlock = 512; const int blocks = (wordCounter + threadsPerBlock - 1) / threadsPerBlock; // fill it all assert(0 == wordCounter % 2); // wordCounter is the total no of words in each event to be trasfered on device auto word_d = cms::cuda::make_device_unique<uint32_t[]>(wordCounter, stream); auto fedId_d = cms::cuda::make_device_unique<uint8_t[]>(wordCounter, stream); cudaCheck( hipMemcpyAsync(word_d.get(), wordFed.word(), wordCounter * sizeof(uint32_t), hipMemcpyDefault, stream)); cudaCheck(hipMemcpyAsync( fedId_d.get(), wordFed.fedId(), wordCounter * sizeof(uint8_t) / 2, hipMemcpyDefault, stream)); // Launch rawToDigi kernel if (debug) hipLaunchKernelGGL(( RawToDigi_kernel<true>), dim3(blocks), dim3(threadsPerBlock), 0, stream, // cablingMap, modToUnp, wordCounter, word_d.get(), fedId_d.get(), digis_d.view(), digiErrors_d.error(), // returns nullptr if default-constructed useQualityInfo, includeErrors); else hipLaunchKernelGGL(( RawToDigi_kernel<false>), dim3(blocks), dim3(threadsPerBlock), 0, stream, // cablingMap, modToUnp, wordCounter, word_d.get(), fedId_d.get(), digis_d.view(), digiErrors_d.error(), // returns nullptr if default-constructed useQualityInfo, includeErrors); cudaCheck(hipGetLastError()); #ifdef GPU_DEBUG cudaCheck(hipStreamSynchronize(stream)); #endif if (includeErrors) { digiErrors_d.copyErrorToHostAsync(stream); } } // End of Raw2Digi and passing data for clustering { // clusterizer ... using namespace gpuClustering; int threadsPerBlock = 256; int blocks = (::max(int(wordCounter), int(TrackerTraits::numberOfModules)) + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( gpuCalibPixel::calibDigis), dim3(blocks), dim3(threadsPerBlock), 0, stream, clusterThresholds, digis_d.view().moduleId(), digis_d.view().xx(), digis_d.view().yy(), digis_d.view().adc(), gains, wordCounter, clusters_d->moduleStart(), clusters_d->clusInModule(), clusters_d->clusModuleStart()); cudaCheck(hipGetLastError()); #ifdef GPU_DEBUG cudaCheck(hipStreamSynchronize(stream)); #endif #ifdef GPU_DEBUG std::cout << "CUDA countModules kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n"; #endif hipLaunchKernelGGL(( countModules<TrackerTraits>), dim3(blocks), dim3(threadsPerBlock), 0, stream, digis_d->moduleId(), clusters_d->moduleStart(), digis_d->clus(), wordCounter); cudaCheck(hipGetLastError()); threadsPerBlock = ((TrackerTraits::maxPixInModule / 16 + 128 - 1) / 128) * 128; /// should be larger than maxPixInModule/16 aka (maxPixInModule/maxiter in the kernel) blocks = TrackerTraits::numberOfModules; #ifdef GPU_DEBUG std::cout << "CUDA findClus kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n"; #endif hipLaunchKernelGGL(( findClus<TrackerTraits>), dim3(blocks), dim3(threadsPerBlock), 0, stream, digis_d->rawIdArr(), digis_d->moduleId(), digis_d->xx(), digis_d->yy(), clusters_d->moduleStart(), clusters_d->clusInModule(), clusters_d->moduleId(), digis_d->clus(), wordCounter); cudaCheck(hipGetLastError()); #ifdef GPU_DEBUG cudaCheck(hipStreamSynchronize(stream)); #endif // apply charge cut hipLaunchKernelGGL(( clusterChargeCut<TrackerTraits>), dim3(blocks), dim3(threadsPerBlock), 0, stream, clusterThresholds, digis_d->moduleId(), digis_d->adc(), clusters_d->moduleStart(), clusters_d->clusInModule(), clusters_d->moduleId(), digis_d->clus(), wordCounter); cudaCheck(hipGetLastError()); // count the module start indices already here (instead of // rechits) so that the number of clusters/hits can be made // available in the rechit producer without additional points of // synchronization/ExternalWork auto nModules_Clusters_d = cms::cuda::make_device_unique<uint32_t[]>(3, stream); // MUST be ONE block hipLaunchKernelGGL(( fillHitsModuleStart<TrackerTraits>), dim3(1), dim3(1024), 0, stream, clusters_d->clusInModule(), clusters_d->clusModuleStart(), clusters_d->moduleStart(), nModules_Clusters_d.get()); // copy to host nModules_Clusters_h = cms::cuda::make_host_unique<uint32_t[]>(3, stream); cudaCheck(hipMemcpyAsync( nModules_Clusters_h.get(), nModules_Clusters_d.get(), 3 * sizeof(uint32_t), hipMemcpyDefault, stream)); #ifdef GPU_DEBUG cudaCheck(hipStreamSynchronize(stream)); #endif } // end clusterizer scope } template <typename TrackerTraits> void SiPixelRawToClusterGPUKernel<TrackerTraits>::makePhase2ClustersAsync( const SiPixelClusterThresholds clusterThresholds, const uint16_t *moduleIds, const uint16_t *xDigis, const uint16_t *yDigis, const uint16_t *adcDigis, const uint32_t *packedData, const uint32_t *rawIds, const uint32_t numDigis, hipStream_t stream) { using namespace gpuClustering; nDigis = numDigis; digis_d = SiPixelDigisCUDA(numDigis, stream); cudaCheck(hipMemcpyAsync(digis_d->moduleId(), moduleIds, sizeof(uint16_t) * numDigis, hipMemcpyDefault, stream)); cudaCheck(hipMemcpyAsync(digis_d->xx(), xDigis, sizeof(uint16_t) * numDigis, hipMemcpyDefault, stream)); cudaCheck(hipMemcpyAsync(digis_d->yy(), yDigis, sizeof(uint16_t) * numDigis, hipMemcpyDefault, stream)); cudaCheck(hipMemcpyAsync(digis_d->adc(), adcDigis, sizeof(uint16_t) * numDigis, hipMemcpyDefault, stream)); cudaCheck(hipMemcpyAsync(digis_d->pdigi(), packedData, sizeof(uint32_t) * numDigis, hipMemcpyDefault, stream)); cudaCheck(hipMemcpyAsync(digis_d->rawIdArr(), rawIds, sizeof(uint32_t) * numDigis, hipMemcpyDefault, stream)); clusters_d = SiPixelClustersCUDA(TrackerTraits::numberOfModules, stream); nModules_Clusters_h = cms::cuda::make_host_unique<uint32_t[]>(2, stream); int threadsPerBlock = 512; int blocks = (int(numDigis) + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( gpuCalibPixel::calibDigisPhase2), dim3(blocks), dim3(threadsPerBlock), 0, stream, clusterThresholds, digis_d->moduleId(), digis_d->adc(), numDigis, clusters_d->moduleStart(), clusters_d->clusInModule(), clusters_d->clusModuleStart()); cudaCheck(hipGetLastError()); #ifdef GPU_DEBUG cudaCheck(hipStreamSynchronize(stream)); std::cout << "CUDA countModules kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n"; #endif hipLaunchKernelGGL(( countModules<TrackerTraits>), dim3(blocks), dim3(threadsPerBlock), 0, stream, digis_d->moduleId(), clusters_d->moduleStart(), digis_d->clus(), numDigis); cudaCheck(hipGetLastError()); // read the number of modules into a data member, used by getProduct()) cudaCheck(hipMemcpyAsync( &(nModules_Clusters_h[0]), clusters_d->moduleStart(), sizeof(uint32_t), hipMemcpyDefault, stream)); threadsPerBlock = 256; blocks = TrackerTraits::numberOfModules; #ifdef GPU_DEBUG cudaCheck(hipStreamSynchronize(stream)); std::cout << "CUDA findClus kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n"; #endif hipLaunchKernelGGL(( findClus<TrackerTraits>), dim3(blocks), dim3(threadsPerBlock), 0, stream, digis_d->rawIdArr(), digis_d->moduleId(), digis_d->xx(), digis_d->yy(), clusters_d->moduleStart(), clusters_d->clusInModule(), clusters_d->moduleId(), digis_d->clus(), numDigis); cudaCheck(hipGetLastError()); #ifdef GPU_DEBUG cudaCheck(hipStreamSynchronize(stream)); std::cout << "CUDA clusterChargeCut kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n"; #endif // apply charge cut hipLaunchKernelGGL(( clusterChargeCut<TrackerTraits>), dim3(blocks), dim3(threadsPerBlock), 0, stream, clusterThresholds, digis_d->moduleId(), digis_d->adc(), clusters_d->moduleStart(), clusters_d->clusInModule(), clusters_d->moduleId(), digis_d->clus(), numDigis); cudaCheck(hipGetLastError()); auto nModules_Clusters_d = cms::cuda::make_device_unique<uint32_t[]>(3, stream); // MUST be ONE block #ifdef GPU_DEBUG cudaCheck(hipStreamSynchronize(stream)); std::cout << "CUDA fillHitsModuleStart kernel launch \n"; #endif hipLaunchKernelGGL(( fillHitsModuleStart<TrackerTraits>), dim3(1), dim3(1024), 0, stream, clusters_d->clusInModule(), clusters_d->clusModuleStart(), clusters_d->moduleStart(), nModules_Clusters_d.get()); nModules_Clusters_h = cms::cuda::make_host_unique<uint32_t[]>(3, stream); cudaCheck(hipMemcpyAsync( nModules_Clusters_h.get(), nModules_Clusters_d.get(), 3 * sizeof(uint32_t), hipMemcpyDefault, stream)); #ifdef GPU_DEBUG cudaCheck(hipStreamSynchronize(stream)); #endif } // template class SiPixelRawToClusterGPUKernel<pixelTopology::Phase1>; template class SiPixelRawToClusterGPUKernel<pixelTopology::Phase2>; template class SiPixelRawToClusterGPUKernel<pixelTopology::HIonPhase1>; } // namespace pixelgpudetails
83873bbc9b7a8adc966e4a1efda887bfca5052eb.cu
/* Sushil Dubey, Shashi Dugad, TIFR, July 2017 * * File Name: RawToClusterGPU.cu * Description: It converts Raw data into Digi Format on GPU * Finaly the Output of RawToDigi data is given to pixelClusterizer **/ // C++ includes #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <fstream> #include <iomanip> #include <iostream> // CUDA includes #include <cuda_runtime.h> // CMSSW includes #include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h" #include "CondFormats/SiPixelObjects/interface/SiPixelROCsStatusAndMapping.h" #include "DataFormats/FEDRawData/interface/FEDNumbering.h" #include "DataFormats/TrackerCommon/interface/TrackerTopology.h" #include "DataFormats/SiPixelDigi/interface/SiPixelDigiConstants.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" #include "HeterogeneousCore/CUDAUtilities/interface/host_unique_ptr.h" #include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuCalibPixel.h" #include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h" #include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClustering.h" // local includes #include "SiPixelRawToClusterGPUKernel.h" // #define GPU_DEBUG namespace pixelgpudetails { __device__ bool isBarrel(uint32_t rawId) { return (PixelSubdetector::PixelBarrel == ((rawId >> DetId::kSubdetOffset) & DetId::kSubdetMask)); } __device__ pixelgpudetails::DetIdGPU getRawId(const SiPixelROCsStatusAndMapping *cablingMap, uint8_t fed, uint32_t link, uint32_t roc) { uint32_t index = fed * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc; pixelgpudetails::DetIdGPU detId = { cablingMap->rawId[index], cablingMap->rocInDet[index], cablingMap->moduleId[index]}; return detId; } //reference http://cmsdoxygen.web.cern.ch/cmsdoxygen/CMSSW_9_2_0/doc/html/dd/d31/FrameConversion_8cc_source.html //http://cmslxr.fnal.gov/source/CondFormats/SiPixelObjects/src/PixelROC.cc?v=CMSSW_9_2_0#0071 // Convert local pixel to pixelgpudetails::global pixel __device__ pixelgpudetails::Pixel frameConversion( bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, pixelgpudetails::Pixel local) { int slopeRow = 0, slopeCol = 0; int rowOffset = 0, colOffset = 0; if (bpix) { if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1 if (rocIdInDetUnit < 8) { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1; } else { slopeRow = -1; slopeCol = 1; rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1; colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc; } // if roc } else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1 if (rocIdInDetUnit < 8) { slopeRow = -1; slopeCol = 1; rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1; colOffset = rocIdInDetUnit * pixelgpudetails::numColsInRoc; } else { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (16 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1; } } } else { // fpix if (side == -1) { // pannel 1 if (rocIdInDetUnit < 8) { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1; } else { slopeRow = -1; slopeCol = 1; rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1; colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc; } } else { // pannel 2 if (rocIdInDetUnit < 8) { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1; } else { slopeRow = -1; slopeCol = 1; rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1; colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc; } } // side } uint32_t gRow = rowOffset + slopeRow * local.row; uint32_t gCol = colOffset + slopeCol * local.col; // inside frameConversion row: gRow, column: gCol pixelgpudetails::Pixel global = {gRow, gCol}; return global; } // error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc template <bool debug = false> __device__ uint8_t conversionError(uint8_t fedId, uint8_t status) { uint8_t errorType = 0; switch (status) { case (1): { if constexpr (debug) printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId); errorType = 35; break; } case (2): { if constexpr (debug) printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId); errorType = 36; break; } case (3): { if constexpr (debug) printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId); errorType = 37; break; } case (4): { if constexpr (debug) printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId); errorType = 38; break; } default: if constexpr (debug) printf("Cabling check returned unexpected result, status = %i\n", status); }; return errorType; } __device__ bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) { /// row and column in ROC representation return ((rocRow < pixelgpudetails::numRowsInRoc) & (rocCol < pixelgpudetails::numColsInRoc)); } __device__ bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); } // error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc template <bool debug = false> __device__ uint8_t checkROC(uint32_t errorWord, uint8_t fedId, uint32_t link, const SiPixelROCsStatusAndMapping *cablingMap) { uint8_t errorType = (errorWord >> sipixelconstants::ROC_shift) & sipixelconstants::ERROR_mask; if (errorType < 25) return 0; bool errorFound = false; switch (errorType) { case (25): { errorFound = true; uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + 1; if (index > 1 && index <= cablingMap->size) { if (!(link == cablingMap->link[index] && 1 == cablingMap->roc[index])) errorFound = false; } if constexpr (debug) if (errorFound) printf("Invalid ROC = 25 found (errorType = 25)\n"); break; } case (26): { if constexpr (debug) printf("Gap word found (errorType = 26)\n"); errorFound = true; break; } case (27): { if constexpr (debug) printf("Dummy word found (errorType = 27)\n"); errorFound = true; break; } case (28): { if constexpr (debug) printf("Error fifo nearly full (errorType = 28)\n"); errorFound = true; break; } case (29): { if constexpr (debug) printf("Timeout on a channel (errorType = 29)\n"); if ((errorWord >> sipixelconstants::OMIT_ERR_shift) & sipixelconstants::OMIT_ERR_mask) { if constexpr (debug) printf("...first errorType=29 error, this gets masked out\n"); } errorFound = true; break; } case (30): { if constexpr (debug) printf("TBM error trailer (errorType = 30)\n"); int stateMatch_bits = 4; int stateMatch_shift = 8; uint32_t stateMatch_mask = ~(~uint32_t(0) << stateMatch_bits); int stateMatch = (errorWord >> stateMatch_shift) & stateMatch_mask; if (stateMatch != 1 && stateMatch != 8) { if constexpr (debug) printf("FED error 30 with unexpected State Bits (errorType = 30)\n"); } if (stateMatch == 1) errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30 errorFound = true; break; } case (31): { if constexpr (debug) printf("Event number error (errorType = 31)\n"); errorFound = true; break; } default: errorFound = false; }; return errorFound ? errorType : 0; } // error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc template <bool debug = false> __device__ uint32_t getErrRawID(uint8_t fedId, uint32_t errWord, uint32_t errorType, const SiPixelROCsStatusAndMapping *cablingMap) { uint32_t rID = 0xffffffff; switch (errorType) { case 25: case 30: case 31: case 36: case 40: { uint32_t roc = 1; uint32_t link = sipixelconstants::getLink(errWord); uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId; if (rID_temp != gpuClustering::invalidModuleId) rID = rID_temp; break; } case 29: { int chanNmbr = 0; const int DB0_shift = 0; const int DB1_shift = DB0_shift + 1; const int DB2_shift = DB1_shift + 1; const int DB3_shift = DB2_shift + 1; const int DB4_shift = DB3_shift + 1; const uint32_t DataBit_mask = ~(~uint32_t(0) << 1); int CH1 = (errWord >> DB0_shift) & DataBit_mask; int CH2 = (errWord >> DB1_shift) & DataBit_mask; int CH3 = (errWord >> DB2_shift) & DataBit_mask; int CH4 = (errWord >> DB3_shift) & DataBit_mask; int CH5 = (errWord >> DB4_shift) & DataBit_mask; int BLOCK_bits = 3; int BLOCK_shift = 8; uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits); int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask; int localCH = 1 * CH1 + 2 * CH2 + 3 * CH3 + 4 * CH4 + 5 * CH5; if (BLOCK % 2 == 0) chanNmbr = (BLOCK / 2) * 9 + localCH; else chanNmbr = ((BLOCK - 1) / 2) * 9 + 4 + localCH; if ((chanNmbr < 1) || (chanNmbr > 36)) break; // signifies unexpected result uint32_t roc = 1; uint32_t link = chanNmbr; uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId; if (rID_temp != gpuClustering::invalidModuleId) rID = rID_temp; break; } case 37: case 38: { uint32_t roc = sipixelconstants::getROC(errWord); uint32_t link = sipixelconstants::getLink(errWord); uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId; if (rID_temp != gpuClustering::invalidModuleId) rID = rID_temp; break; } default: break; }; return rID; } // Kernel to perform Raw to Digi conversion template <bool debug = false> __global__ void RawToDigi_kernel(const SiPixelROCsStatusAndMapping *cablingMap, const unsigned char *modToUnp, const uint32_t wordCounter, const uint32_t *word, const uint8_t *fedIds, SiPixelDigisCUDASOAView digisView, cms::cuda::SimpleVector<SiPixelErrorCompact> *err, bool useQualityInfo, bool includeErrors) { //if (threadIdx.x==0) printf("Event: %u blockIdx.x: %u start: %u end: %u\n", eventno, blockIdx.x, begin, end); int32_t first = threadIdx.x + blockIdx.x * blockDim.x; for (int32_t iloop = first, nend = wordCounter; iloop < nend; iloop += blockDim.x * gridDim.x) { auto gIndex = iloop; auto dvgi = digisView[gIndex]; dvgi.xx() = 0; dvgi.yy() = 0; dvgi.adc() = 0; bool skipROC = false; uint8_t fedId = fedIds[gIndex / 2]; // +1200; // initialize (too many coninue below) dvgi.pdigi() = 0; dvgi.rawIdArr() = 0; dvgi.moduleId() = gpuClustering::invalidModuleId; uint32_t ww = word[gIndex]; // Array containing 32 bit raw data if (ww == 0) { // 0 is an indicator of a noise/dead channel, skip these pixels during clusterization continue; } uint32_t link = sipixelconstants::getLink(ww); // Extract link uint32_t roc = sipixelconstants::getROC(ww); // Extract ROC in link uint8_t errorType = checkROC<debug>(ww, fedId, link, cablingMap); skipROC = (roc < pixelgpudetails::maxROCIndex) ? false : (errorType != 0); if (includeErrors and skipROC) { uint32_t rID = getErrRawID<debug>(fedId, ww, errorType, cablingMap); err->push_back(SiPixelErrorCompact{rID, ww, errorType, fedId}); continue; } // check for spurious channels if (roc > MAX_ROC or link > MAX_LINK) { if constexpr (debug) { printf("spurious roc %d found on link %d, detector %d (index %d)\n", roc, link, getRawId(cablingMap, fedId, link, 1).rawId, gIndex); } continue; } uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc; if (useQualityInfo) { skipROC = cablingMap->badRocs[index]; if (skipROC) continue; } skipROC = modToUnp[index]; if (skipROC) continue; pixelgpudetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc); uint32_t rawId = detId.rawId; uint32_t layer = 0; int side = 0, panel = 0, module = 0; bool barrel = isBarrel(rawId); if (barrel) { layer = (rawId >> pixelgpudetails::layerStartBit) & pixelgpudetails::layerMask; module = (rawId >> pixelgpudetails::moduleStartBit) & pixelgpudetails::moduleMask; side = (module < 5) ? -1 : 1; } else { // endcap ids layer = 0; panel = (rawId >> pixelgpudetails::panelStartBit) & pixelgpudetails::panelMask; side = (panel == 1) ? -1 : 1; } // ***special case of layer to 1 be handled here pixelgpudetails::Pixel localPix; if (layer == 1) { uint32_t col = sipixelconstants::getCol(ww); uint32_t row = sipixelconstants::getRow(ww); localPix.row = row; localPix.col = col; if (includeErrors) { if (not rocRowColIsValid(row, col)) { uint8_t error = conversionError<debug>(fedId, 3); //use the device function and fill the arrays err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId}); if constexpr (debug) printf("BPIX1 Error status: %i\n", error); continue; } } } else { // ***conversion rules for dcol and pxid uint32_t dcol = sipixelconstants::getDCol(ww); uint32_t pxid = sipixelconstants::getPxId(ww); uint32_t row = pixelgpudetails::numRowsInRoc - pxid / 2; uint32_t col = dcol * 2 + pxid % 2; localPix.row = row; localPix.col = col; if (includeErrors and not dcolIsValid(dcol, pxid)) { uint8_t error = conversionError<debug>(fedId, 3); err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId}); if constexpr (debug) printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc); continue; } } pixelgpudetails::Pixel globalPix = frameConversion(barrel, side, layer, detId.rocInDet, localPix); dvgi.xx() = globalPix.row; // origin shifting by 1 0-159 dvgi.yy() = globalPix.col; // origin shifting by 1 0-415 dvgi.adc() = sipixelconstants::getADC(ww); dvgi.pdigi() = pixelgpudetails::pack(globalPix.row, globalPix.col, dvgi.adc()); dvgi.moduleId() = detId.moduleId; dvgi.rawIdArr() = rawId; } // end of loop (gIndex < end) } // end of Raw to Digi kernel template <typename TrackerTraits> __global__ void fillHitsModuleStart(uint32_t const *__restrict__ clusInModule, uint32_t *__restrict__ moduleStart, uint32_t const *__restrict__ nModules, uint32_t *__restrict__ nModules_Clusters) { constexpr int nMaxModules = TrackerTraits::numberOfModules; constexpr int startBPIX2 = TrackerTraits::layerStart[1]; constexpr uint32_t maxHitsInModule = TrackerTraits::maxHitsInModule; assert(startBPIX2 < nMaxModules); assert(nMaxModules < 4096); // easy to extend at least till 32*1024 assert(nMaxModules > 1024); assert(1 == gridDim.x); assert(0 == blockIdx.x); int first = threadIdx.x; // limit to MaxHitsInModule; for (int i = first, iend = nMaxModules; i < iend; i += blockDim.x) { moduleStart[i + 1] = std::min(maxHitsInModule, clusInModule[i]); } constexpr bool isPhase2 = std::is_base_of<pixelTopology::Phase2, TrackerTraits>::value; __shared__ uint32_t ws[32]; cms::cuda::blockPrefixScan(moduleStart + 1, moduleStart + 1, 1024, ws); constexpr int lastModules = isPhase2 ? 1024 : nMaxModules - 1024; cms::cuda::blockPrefixScan(moduleStart + 1024 + 1, moduleStart + 1024 + 1, lastModules, ws); if constexpr (isPhase2) { cms::cuda::blockPrefixScan(moduleStart + 2048 + 1, moduleStart + 2048 + 1, 1024, ws); cms::cuda::blockPrefixScan(moduleStart + 3072 + 1, moduleStart + 3072 + 1, nMaxModules - 3072, ws); } for (int i = first + 1025, iend = isPhase2 ? 2049 : nMaxModules + 1; i < iend; i += blockDim.x) { moduleStart[i] += moduleStart[1024]; } __syncthreads(); if constexpr (isPhase2) { for (int i = first + 2049, iend = 3073; i < iend; i += blockDim.x) { moduleStart[i] += moduleStart[2048]; } __syncthreads(); for (int i = first + 3073, iend = nMaxModules + 1; i < iend; i += blockDim.x) { moduleStart[i] += moduleStart[3072]; } __syncthreads(); } if (threadIdx.x == 0) { // copy the number of modules nModules_Clusters[0] = *nModules; // last element holds the number of all clusters nModules_Clusters[1] = moduleStart[nMaxModules]; // element 96 is the start of BPIX2 (i.e. the number of clusters in BPIX1) nModules_Clusters[2] = moduleStart[startBPIX2]; } #ifdef GPU_DEBUG uint16_t maxH = isPhase2 ? 3027 : 1024; assert(0 == moduleStart[0]); auto c0 = std::min(maxHitsInModule, clusInModule[0]); assert(c0 == moduleStart[1]); assert(moduleStart[maxH] >= moduleStart[maxH - 1]); assert(moduleStart[maxH + 1] >= moduleStart[maxH]); assert(moduleStart[nMaxModules] >= moduleStart[maxH + 1]); constexpr int startFP1 = TrackerTraits::numberOfModulesInBarrel; constexpr int startLastFwd = TrackerTraits::layerStart[TrackerTraits::numberOfLayers]; for (int i = first, iend = nMaxModules + 1; i < iend; i += blockDim.x) { if (0 != i) assert(moduleStart[i] >= moduleStart[i - i]); // [BPX1, BPX2, BPX3, BPX4, FP1, FP2, FP3, FN1, FN2, FN3, LAST_VALID] // [ 0, 96, 320, 672, 1184, 1296, 1408, 1520, 1632, 1744, 1856] if (i == startBPIX2 || i == startFP1 || i == startLastFwd || i == nMaxModules) printf("moduleStart %d %d\n", i, moduleStart[i]); } #endif } // Interface to outside template <typename TrackerTraits> void SiPixelRawToClusterGPUKernel<TrackerTraits>::makePhase1ClustersAsync( bool isRun2, const SiPixelClusterThresholds clusterThresholds, const SiPixelROCsStatusAndMapping *cablingMap, const unsigned char *modToUnp, const SiPixelGainForHLTonGPU *gains, const WordFedAppender &wordFed, SiPixelFormatterErrors &&errors, const uint32_t wordCounter, const uint32_t fedCounter, bool useQualityInfo, bool includeErrors, bool debug, cudaStream_t stream) { // we're not opting for calling this function in case of early events assert(wordCounter != 0); nDigis = wordCounter; #ifdef GPU_DEBUG std::cout << "decoding " << wordCounter << " digis." << std::endl; #endif // since wordCounter != 0 we're not allocating 0 bytes, // digis_d = SiPixelDigisCUDA(wordCounter, stream); digis_d = SiPixelDigisCUDA(size_t(wordCounter), stream); if (includeErrors) { digiErrors_d = SiPixelDigiErrorsCUDA(wordCounter, std::move(errors), stream); } clusters_d = SiPixelClustersCUDA(TrackerTraits::numberOfModules, stream); // Begin Raw2Digi block { const int threadsPerBlock = 512; const int blocks = (wordCounter + threadsPerBlock - 1) / threadsPerBlock; // fill it all assert(0 == wordCounter % 2); // wordCounter is the total no of words in each event to be trasfered on device auto word_d = cms::cuda::make_device_unique<uint32_t[]>(wordCounter, stream); auto fedId_d = cms::cuda::make_device_unique<uint8_t[]>(wordCounter, stream); cudaCheck( cudaMemcpyAsync(word_d.get(), wordFed.word(), wordCounter * sizeof(uint32_t), cudaMemcpyDefault, stream)); cudaCheck(cudaMemcpyAsync( fedId_d.get(), wordFed.fedId(), wordCounter * sizeof(uint8_t) / 2, cudaMemcpyDefault, stream)); // Launch rawToDigi kernel if (debug) RawToDigi_kernel<true><<<blocks, threadsPerBlock, 0, stream>>>( // cablingMap, modToUnp, wordCounter, word_d.get(), fedId_d.get(), digis_d.view(), digiErrors_d.error(), // returns nullptr if default-constructed useQualityInfo, includeErrors); else RawToDigi_kernel<false><<<blocks, threadsPerBlock, 0, stream>>>( // cablingMap, modToUnp, wordCounter, word_d.get(), fedId_d.get(), digis_d.view(), digiErrors_d.error(), // returns nullptr if default-constructed useQualityInfo, includeErrors); cudaCheck(cudaGetLastError()); #ifdef GPU_DEBUG cudaCheck(cudaStreamSynchronize(stream)); #endif if (includeErrors) { digiErrors_d.copyErrorToHostAsync(stream); } } // End of Raw2Digi and passing data for clustering { // clusterizer ... using namespace gpuClustering; int threadsPerBlock = 256; int blocks = (std::max(int(wordCounter), int(TrackerTraits::numberOfModules)) + threadsPerBlock - 1) / threadsPerBlock; gpuCalibPixel::calibDigis<<<blocks, threadsPerBlock, 0, stream>>>(clusterThresholds, digis_d.view().moduleId(), digis_d.view().xx(), digis_d.view().yy(), digis_d.view().adc(), gains, wordCounter, clusters_d->moduleStart(), clusters_d->clusInModule(), clusters_d->clusModuleStart()); cudaCheck(cudaGetLastError()); #ifdef GPU_DEBUG cudaCheck(cudaStreamSynchronize(stream)); #endif #ifdef GPU_DEBUG std::cout << "CUDA countModules kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n"; #endif countModules<TrackerTraits><<<blocks, threadsPerBlock, 0, stream>>>( digis_d->moduleId(), clusters_d->moduleStart(), digis_d->clus(), wordCounter); cudaCheck(cudaGetLastError()); threadsPerBlock = ((TrackerTraits::maxPixInModule / 16 + 128 - 1) / 128) * 128; /// should be larger than maxPixInModule/16 aka (maxPixInModule/maxiter in the kernel) blocks = TrackerTraits::numberOfModules; #ifdef GPU_DEBUG std::cout << "CUDA findClus kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n"; #endif findClus<TrackerTraits><<<blocks, threadsPerBlock, 0, stream>>>(digis_d->rawIdArr(), digis_d->moduleId(), digis_d->xx(), digis_d->yy(), clusters_d->moduleStart(), clusters_d->clusInModule(), clusters_d->moduleId(), digis_d->clus(), wordCounter); cudaCheck(cudaGetLastError()); #ifdef GPU_DEBUG cudaCheck(cudaStreamSynchronize(stream)); #endif // apply charge cut clusterChargeCut<TrackerTraits><<<blocks, threadsPerBlock, 0, stream>>>(clusterThresholds, digis_d->moduleId(), digis_d->adc(), clusters_d->moduleStart(), clusters_d->clusInModule(), clusters_d->moduleId(), digis_d->clus(), wordCounter); cudaCheck(cudaGetLastError()); // count the module start indices already here (instead of // rechits) so that the number of clusters/hits can be made // available in the rechit producer without additional points of // synchronization/ExternalWork auto nModules_Clusters_d = cms::cuda::make_device_unique<uint32_t[]>(3, stream); // MUST be ONE block fillHitsModuleStart<TrackerTraits><<<1, 1024, 0, stream>>>(clusters_d->clusInModule(), clusters_d->clusModuleStart(), clusters_d->moduleStart(), nModules_Clusters_d.get()); // copy to host nModules_Clusters_h = cms::cuda::make_host_unique<uint32_t[]>(3, stream); cudaCheck(cudaMemcpyAsync( nModules_Clusters_h.get(), nModules_Clusters_d.get(), 3 * sizeof(uint32_t), cudaMemcpyDefault, stream)); #ifdef GPU_DEBUG cudaCheck(cudaStreamSynchronize(stream)); #endif } // end clusterizer scope } template <typename TrackerTraits> void SiPixelRawToClusterGPUKernel<TrackerTraits>::makePhase2ClustersAsync( const SiPixelClusterThresholds clusterThresholds, const uint16_t *moduleIds, const uint16_t *xDigis, const uint16_t *yDigis, const uint16_t *adcDigis, const uint32_t *packedData, const uint32_t *rawIds, const uint32_t numDigis, cudaStream_t stream) { using namespace gpuClustering; nDigis = numDigis; digis_d = SiPixelDigisCUDA(numDigis, stream); cudaCheck(cudaMemcpyAsync(digis_d->moduleId(), moduleIds, sizeof(uint16_t) * numDigis, cudaMemcpyDefault, stream)); cudaCheck(cudaMemcpyAsync(digis_d->xx(), xDigis, sizeof(uint16_t) * numDigis, cudaMemcpyDefault, stream)); cudaCheck(cudaMemcpyAsync(digis_d->yy(), yDigis, sizeof(uint16_t) * numDigis, cudaMemcpyDefault, stream)); cudaCheck(cudaMemcpyAsync(digis_d->adc(), adcDigis, sizeof(uint16_t) * numDigis, cudaMemcpyDefault, stream)); cudaCheck(cudaMemcpyAsync(digis_d->pdigi(), packedData, sizeof(uint32_t) * numDigis, cudaMemcpyDefault, stream)); cudaCheck(cudaMemcpyAsync(digis_d->rawIdArr(), rawIds, sizeof(uint32_t) * numDigis, cudaMemcpyDefault, stream)); clusters_d = SiPixelClustersCUDA(TrackerTraits::numberOfModules, stream); nModules_Clusters_h = cms::cuda::make_host_unique<uint32_t[]>(2, stream); int threadsPerBlock = 512; int blocks = (int(numDigis) + threadsPerBlock - 1) / threadsPerBlock; gpuCalibPixel::calibDigisPhase2<<<blocks, threadsPerBlock, 0, stream>>>(clusterThresholds, digis_d->moduleId(), digis_d->adc(), numDigis, clusters_d->moduleStart(), clusters_d->clusInModule(), clusters_d->clusModuleStart()); cudaCheck(cudaGetLastError()); #ifdef GPU_DEBUG cudaCheck(cudaStreamSynchronize(stream)); std::cout << "CUDA countModules kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n"; #endif countModules<TrackerTraits><<<blocks, threadsPerBlock, 0, stream>>>( digis_d->moduleId(), clusters_d->moduleStart(), digis_d->clus(), numDigis); cudaCheck(cudaGetLastError()); // read the number of modules into a data member, used by getProduct()) cudaCheck(cudaMemcpyAsync( &(nModules_Clusters_h[0]), clusters_d->moduleStart(), sizeof(uint32_t), cudaMemcpyDefault, stream)); threadsPerBlock = 256; blocks = TrackerTraits::numberOfModules; #ifdef GPU_DEBUG cudaCheck(cudaStreamSynchronize(stream)); std::cout << "CUDA findClus kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n"; #endif findClus<TrackerTraits><<<blocks, threadsPerBlock, 0, stream>>>(digis_d->rawIdArr(), digis_d->moduleId(), digis_d->xx(), digis_d->yy(), clusters_d->moduleStart(), clusters_d->clusInModule(), clusters_d->moduleId(), digis_d->clus(), numDigis); cudaCheck(cudaGetLastError()); #ifdef GPU_DEBUG cudaCheck(cudaStreamSynchronize(stream)); std::cout << "CUDA clusterChargeCut kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n"; #endif // apply charge cut clusterChargeCut<TrackerTraits><<<blocks, threadsPerBlock, 0, stream>>>(clusterThresholds, digis_d->moduleId(), digis_d->adc(), clusters_d->moduleStart(), clusters_d->clusInModule(), clusters_d->moduleId(), digis_d->clus(), numDigis); cudaCheck(cudaGetLastError()); auto nModules_Clusters_d = cms::cuda::make_device_unique<uint32_t[]>(3, stream); // MUST be ONE block #ifdef GPU_DEBUG cudaCheck(cudaStreamSynchronize(stream)); std::cout << "CUDA fillHitsModuleStart kernel launch \n"; #endif fillHitsModuleStart<TrackerTraits><<<1, 1024, 0, stream>>>(clusters_d->clusInModule(), clusters_d->clusModuleStart(), clusters_d->moduleStart(), nModules_Clusters_d.get()); nModules_Clusters_h = cms::cuda::make_host_unique<uint32_t[]>(3, stream); cudaCheck(cudaMemcpyAsync( nModules_Clusters_h.get(), nModules_Clusters_d.get(), 3 * sizeof(uint32_t), cudaMemcpyDefault, stream)); #ifdef GPU_DEBUG cudaCheck(cudaStreamSynchronize(stream)); #endif } // template class SiPixelRawToClusterGPUKernel<pixelTopology::Phase1>; template class SiPixelRawToClusterGPUKernel<pixelTopology::Phase2>; template class SiPixelRawToClusterGPUKernel<pixelTopology::HIonPhase1>; } // namespace pixelgpudetails
e5eed3238a3baf1c19be1ad1e9ea6d6792136913.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/CUDAGeneratorImpl.h> #include <ATen/TensorUtils.h> #include <ATen/core/TensorAccessor.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <ATen/hip/HIPGraphsUtils.cuh> #include <THH/THHAtomics.cuh> #include <limits> #include <mutex> #include "hipcub/hipcub.hpp" #include "hipcub/hipcub.hpp" #include "hipcub/hipcub.hpp" #include "fbgemm_gpu/dispatch_macros.h" #include "fbgemm_gpu/fbgemm_cuda_utils.cuh" #include "split_embeddings_utils.cuh" constexpr size_t kCacheMaxThreads = 512; using namespace at; using namespace fbgemm_gpu; // TODO: do we care about 64-bit indices? Currently we just ignore. __host__ DEVICE_INLINE uint32_t cache_slot(int32_t h_in, int32_t C) { // MurmorHash3 32-bit mixing function. uint32_t h = (uint32_t)h_in; h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; // https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ return ((uint64_t)h * (uint64_t)C) >> 32; } __host__ DEVICE_INLINE uint32_t cache_slot(int64_t h_in, int32_t C) { // MurmurHash3 64-bit mixing function. uint64_t h = (uint64_t)h_in; h ^= h >> 33; h *= 0xff51afd7ed558ccd; h ^= h >> 33; h *= 0xc4ceb9fe1a85ec53; h ^= h >> 33; return h % (uint32_t)C; } int64_t host_lxu_cache_slot(int64_t h_in, int64_t C) { return static_cast<int64_t>(cache_slot(h_in, static_cast<int32_t>(C))); } constexpr int32_t kCacheLocationMissing = -1; constexpr int64_t kCacheStateInvalid = -1; template <typename emb_t, typename cache_t> __global__ __launch_bounds__(kMaxThreads) void lxu_cache_flush_kernel( PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> weights, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> cache_hash_size_cumsum, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> cache_index_table_map, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets, PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lxu_cache_state, PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits> lxu_cache_weights, bool stochastic_rounding, PhiloxCudaState stochastic_rounding_philox_args) { int32_t B = lxu_cache_weights.size(0); int32_t b = blockIdx.x * blockDim.y + threadIdx.y; if (b >= B) { return; } int32_t slot = b % kWarpSize; int32_t cache_set = b / kWarpSize; int64_t current_idx = lxu_cache_state[cache_set][slot]; if (current_idx != static_cast<int64_t>(kCacheStateInvalid)) { // evict from slot to backing storage int32_t t_current = cache_index_table_map[current_idx]; int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current]; int64_t weights_offset_current = weights_offsets[t_current]; int32_t D_start_current = D_offsets[t_current]; int32_t D_end_current = D_offsets[t_current + 1]; int32_t D_current = D_end_current - D_start_current; int32_t D_emb = D_current; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } auto weight_row = WeightRow<emb_t, cache_t, acc_type<cache_t, true>>( &weights[weights_offset_current + idx_current * D_emb + 0], &lxu_cache_weights[b][0], D_current, nullptr); if (!std::is_same<emb_t, float>::value && stochastic_rounding) { StochasticRoundingRNGState state; // different for every *run* and every *thread*. auto stochastic_rounding_seeds = at::cuda::philox::unpack(stochastic_rounding_philox_args); stochastic_rounding_init( std::get<0>(stochastic_rounding_seeds) ^ std::get<1>(stochastic_rounding_seeds), blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x, &state); weight_row.set_stoc_state(&state); } float2 qparams; if (std::is_same<emb_t, uint8_t>::value) { qparams = thrust_find_qparams<cache_t>(&lxu_cache_weights[b][0], D_current); if (threadIdx.x == 0) { weight_row.store_qparams(qparams); } } for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) { Vec4T<acc_type<cache_t, true>> cache_weights_vec = weight_row.load(d * 4, qparams); weight_row.evict(cache_weights_vec, d * 4, qparams); } } } void lxu_cache_flush_cuda( Tensor uvm_weights, Tensor cache_hash_size_cumsum, Tensor cache_index_table_map, Tensor weights_offsets, Tensor D_offsets, int64_t total_D, Tensor lxu_cache_state, Tensor lxu_cache_weights, bool stochastic_rounding) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(lxu_cache_weights.get_device()); int32_t T = D_offsets.numel() - 1; int32_t S = lxu_cache_weights.size(0); int32_t tx = std::min<int32_t>(total_D / 4 / T, kMaxThreads); dim3 threads(tx, kMaxThreads / tx); dim3 blocks(div_round_up(S, kMaxThreads / tx)); DISPATCH_EMB_CACHE_TYPES( uvm_weights.type(), lxu_cache_weights.type(), "lxu_cache_flush_kernel_2", ([&] { PhiloxCudaState rng_engine_inputs; if (stochastic_rounding && std::is_same<emb_t, Half>::value) { auto gen = at::cuda::detail::getDefaultCUDAGenerator(); std::lock_guard<std::mutex> lock(gen.mutex()); rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen) ->philox_cuda_state(4); } hipLaunchKernelGGL(( lxu_cache_flush_kernel<emb_t, cache_t>) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), uvm_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(), cache_hash_size_cumsum .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), cache_index_table_map .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), weights_offsets .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), lxu_cache_state .packed_accessor32<int64_t, 2, RestrictPtrTraits>(), lxu_cache_weights .packed_accessor64<cache_t, 2, RestrictPtrTraits>(), stochastic_rounding, rng_engine_inputs); })); C10_HIP_KERNEL_LAUNCH_CHECK(); return; } __global__ __launch_bounds__(kMaxThreads) void linearize_cache_indices_kernel( const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> cache_hash_size_cumsum, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> indices, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> offsets, PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> linear_cache_indices) { int32_t T = cache_hash_size_cumsum.size(0) - 1; int64_t total_cache_hash_size = cache_hash_size_cumsum[T]; int32_t B = (offsets.size(0) - 1) / T; int32_t b_t = blockIdx.x * blockDim.x + threadIdx.x; int32_t b = b_t % B; int32_t t = b_t / B; bool valid = t < T; int64_t hash_offset = valid ? cache_hash_size_cumsum[t] : -1; int64_t indices_start = valid ? offsets[t * B + b] : -1; int32_t L = valid ? offsets[t * B + b + 1] - indices_start : 0; int32_t lane_id = threadIdx.x % kWarpSize; // hash_offset < 0 for non-caching tables for (int32_t j = 0; j < kWarpSize; ++j) { int64_t indices_start_warp = __shfl_sync(0xFFFFFFFF, indices_start, j); int32_t L_warp = __shfl_sync(0xFFFFFFFF, L, j); int64_t hash_offset_warp = __shfl_sync(0xFFFFFFFF, hash_offset, j); if (hash_offset_warp >= 0) { for (int32_t i = lane_id; i < L_warp; i += kWarpSize) { auto idx = __ldg(&indices[indices_start_warp + i]); linear_cache_indices[indices_start_warp + i] = hash_offset_warp + idx; } } else { for (int32_t i = lane_id; i < L_warp; i += kWarpSize) { linear_cache_indices[indices_start_warp + i] = total_cache_hash_size; } } } } Tensor linearize_cache_indices_cuda( Tensor cache_hash_size_cumsum, Tensor indices, Tensor offsets) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(cache_hash_size_cumsum.get_device()); auto T = cache_hash_size_cumsum.size(0) - 1; TORCH_CHECK(T > 0); // offsets = [B x T + 1] auto B = (offsets.size(0) - 1) / T; TORCH_CHECK(B > 0); auto linear_cache_indices = at::empty_like(indices); hipLaunchKernelGGL(( linearize_cache_indices_kernel), dim3(div_round_up(B * T, kMaxThreads)), dim3(kMaxThreads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), cache_hash_size_cumsum.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), linear_cache_indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>()); C10_HIP_KERNEL_LAUNCH_CHECK(); return linear_cache_indices; } std::tuple<Tensor, Tensor, c10::optional<Tensor>> get_unique_indices_cuda( Tensor linear_indices, int64_t max_indices, bool compute_count) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(linear_indices.get_device()); TORCH_CHECK(linear_indices.numel() < std::numeric_limits<int32_t>::max()); int32_t N = linear_indices.numel(); auto sorted_indices = at::empty_like(linear_indices); auto unique_indices = at::empty_like(linear_indices); auto unique_indices_length = at::empty({1}, linear_indices.options().dtype(kInt)); c10::optional<Tensor> unique_indices_count = c10::nullopt; if (compute_count) { unique_indices_count = at::empty( {linear_indices.numel()}, linear_indices.options().dtype(kInt)); } // sort indices size_t temp_storage_bytes_0 = 0; AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortKeys( nullptr, temp_storage_bytes_0, linear_indices.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(), N, 0, int(log2(float(max_indices + 1)) + 1), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); auto temp_storage_0 = at::empty( {static_cast<int64_t>(temp_storage_bytes_0)}, linear_indices.options().dtype(kByte)); AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortKeys( temp_storage_0.data_ptr(), temp_storage_bytes_0, linear_indices.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(), N, 0, int(log2(float(max_indices + 1)) + 1), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); // get unique indices if (compute_count) { size_t temp_storage_bytes_1 = 0; AT_CUDA_CHECK(hipcub::DeviceRunLengthEncode::Encode( nullptr, temp_storage_bytes_1, sorted_indices.data_ptr<int64_t>(), unique_indices.data_ptr<int64_t>(), unique_indices_count->data_ptr<int32_t>(), unique_indices_length.data_ptr<int32_t>(), N, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); auto temp_storage_1 = at::empty( {static_cast<int64_t>(temp_storage_bytes_1)}, linear_indices.options().dtype(kByte)); AT_CUDA_CHECK(hipcub::DeviceRunLengthEncode::Encode( temp_storage_1.data_ptr(), temp_storage_bytes_1, sorted_indices.data_ptr<int64_t>(), unique_indices.data_ptr<int64_t>(), unique_indices_count->data_ptr<int32_t>(), unique_indices_length.data_ptr<int32_t>(), N, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); } else { size_t temp_storage_bytes_1 = 0; AT_CUDA_CHECK(hipcub::DeviceSelect::Unique( nullptr, temp_storage_bytes_1, sorted_indices.data_ptr<int64_t>(), unique_indices.data_ptr<int64_t>(), unique_indices_length.data_ptr<int32_t>(), N, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); auto temp_storage_1 = at::empty( {static_cast<int64_t>(temp_storage_bytes_1)}, linear_indices.options().dtype(kByte)); AT_CUDA_CHECK(hipcub::DeviceSelect::Unique( temp_storage_1.data_ptr(), temp_storage_bytes_1, sorted_indices.data_ptr<int64_t>(), unique_indices.data_ptr<int64_t>(), unique_indices_length.data_ptr<int32_t>(), N, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); } return std::make_tuple( unique_indices, unique_indices_length, unique_indices_count); } __global__ __launch_bounds__(kMaxThreads) void lru_cache_find_uncached_kernel( const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> unique_indices, const int32_t* __restrict__ N_unique, int64_t max_indices, const PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lxu_cache_state, PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> cache_sets, int64_t time_stamp, PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lru_state) { int32_t N = unique_indices.size(0); int32_t C = lxu_cache_state.size(0); int32_t n = blockIdx.x * blockDim.y + threadIdx.y; if (n >= N) { return; } if (n >= *N_unique) { if (threadIdx.x == 0) { cache_sets[n] = C; // invalid index, used as sentinel } return; } int64_t idx = unique_indices[n]; if (idx == max_indices) { if (threadIdx.x == 0) { cache_sets[n] = C; // invalid index, used as sentinel } return; } int32_t cache_set = cache_slot(idx, C); auto slot = threadIdx.x; bool found = __ldg((&lxu_cache_state[cache_set][0]) + slot) == idx; if (found) { // mark it as existing. cache_sets[n] = C; // invalid index, used as sentinel // mark it as recently accessed so we don't evict. lru_state[cache_set][slot] = time_stamp; } if (!__any_sync(0xFFFFFFFF, found)) { if (threadIdx.x == 0) { cache_sets[n] = cache_set; } } } std::pair<Tensor, Tensor> lru_cache_find_uncached_cuda( Tensor unique_indices, Tensor unique_indices_length, int64_t max_indices, Tensor lxu_cache_state, int64_t time_stamp, Tensor lru_state) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(unique_indices.get_device()); auto cache_sets = empty_like(unique_indices, unique_indices.options().dtype(kInt)); int32_t N = unique_indices.numel(); auto sorted_cache_sets = empty_like(cache_sets); auto cache_set_sorted_unique_indices = empty_like(unique_indices); // Find uncached indices hipLaunchKernelGGL(( lru_cache_find_uncached_kernel), dim3(div_round_up(N, kMaxThreads / kWarpSize)), dim3(dim3(kWarpSize, kMaxThreads / kWarpSize)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), unique_indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), unique_indices_length.data_ptr<int32_t>(), max_indices, lxu_cache_state.packed_accessor32<int64_t, 2, RestrictPtrTraits>(), cache_sets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), time_stamp, lru_state.packed_accessor32<int64_t, 2, RestrictPtrTraits>()); C10_HIP_KERNEL_LAUNCH_CHECK(); // Sort the cache sets and ids size_t temp_storage_bytes = 0; AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, cache_sets.data_ptr<int32_t>(), sorted_cache_sets.data_ptr<int32_t>(), unique_indices.data_ptr<int64_t>(), cache_set_sorted_unique_indices.data_ptr<int64_t>(), N, 0, int(log2(float(lxu_cache_state.size(0) + 1)) + 1), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, unique_indices.options().dtype(kByte)); AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs( temp_storage.data_ptr(), temp_storage_bytes, cache_sets.data_ptr<int32_t>(), sorted_cache_sets.data_ptr<int32_t>(), unique_indices.data_ptr<int64_t>(), cache_set_sorted_unique_indices.data_ptr<int64_t>(), N, 0, int(log2(float(lxu_cache_state.size(0) + 1)) + 1), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); return {sorted_cache_sets, cache_set_sorted_unique_indices}; } template <typename emb_t, typename cache_t> __global__ __launch_bounds__(kMaxThreads) void lru_cache_insert_kernel( PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> weights, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> cache_hash_size_cumsum, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> cache_index_table_map, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_cache_sets, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> cache_set_sorted_indices, const int32_t* __restrict__ N_unique, PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lxu_cache_state, PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits> lxu_cache_weights, int64_t time_stamp, PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lru_state, bool stochastic_rounding, PhiloxCudaState stochastic_rounding_philox_args) { int32_t C = lxu_cache_state.size(0); int32_t n = blockIdx.x * blockDim.y + threadIdx.y; if (n >= *N_unique) { return; } // check if this warp is responsible for this whole segment. bool segment_start = (n == 0 || sorted_cache_sets[n - 1] != sorted_cache_sets[n]); if (!segment_start) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } int32_t cache_set = sorted_cache_sets[n]; if (cache_set == C) { // ignore the already-existing elements return; } int32_t SL = 1; while (n + SL < *N_unique && sorted_cache_sets[n + SL] == cache_set) { SL += 1; } // now, we need to insert the (unique!) values in indices[n:n + SL] into // our slots. int32_t slot = threadIdx.x; int64_t slot_time = lru_state[cache_set][slot]; int64_t costs[1] = {slot_time}; int32_t slots[1] = {slot}; BitonicSort<int64_t, int32_t, 1, Comparator<int64_t>>::sort(costs, slots); int32_t sorted_slot = slots[0]; int64_t sorted_lru_cost = costs[0]; for (int32_t l = 0; l < min(SL, kWarpSize); ++l) { int32_t insert_slot = __shfl_sync(0xFFFFFFFF, sorted_slot, l); int64_t insert_current_lru_cost = __shfl_sync(0xFFFFFFFF, sorted_lru_cost, l); if (insert_current_lru_cost == time_stamp) { return; } int64_t insert_idx = cache_set_sorted_indices[n + l]; int32_t t_insert = cache_index_table_map[insert_idx]; int64_t idx_insert = insert_idx - cache_hash_size_cumsum[t_insert]; int64_t weights_offset_insert = weights_offsets[t_insert]; int32_t D_start_insert = D_offsets[t_insert]; int32_t D_end_insert = D_offsets[t_insert + 1]; int32_t D_insert = D_end_insert - D_start_insert; // ensure that threadIdx.x is the only thread reading/writing to // lxu_cache_state int64_t current_idx = threadIdx.x == 0 ? lxu_cache_state[cache_set][insert_slot] : 0; current_idx = __shfl_sync(0xFFFFFFFF, current_idx, 0); // not empty if (current_idx != static_cast<int64_t>(kCacheStateInvalid)) { // evict from slot to backing storage int32_t t_current = cache_index_table_map[current_idx]; int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current]; int64_t weights_offset_current = weights_offsets[t_current]; int32_t D_start_current = D_offsets[t_current]; int32_t D_end_current = D_offsets[t_current + 1]; int32_t D_current = D_end_current - D_start_current; int32_t D_emb = D_current; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } auto weight_row = WeightRow<emb_t, cache_t, cache_t>( &weights[weights_offset_current + idx_current * D_emb + 0], &lxu_cache_weights[cache_set * kWarpSize + insert_slot][0], D_current, nullptr); if (!std::is_same<emb_t, float>::value && stochastic_rounding) { StochasticRoundingRNGState state; // different for every *run* and every *thread*. auto stochastic_rounding_seeds = at::cuda::philox::unpack(stochastic_rounding_philox_args); stochastic_rounding_init( std::get<0>(stochastic_rounding_seeds) ^ std::get<1>(stochastic_rounding_seeds), (blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x) * kWarpSize + l, &state); weight_row.set_stoc_state(&state); } float2 qparams; acc_type<cache_t, true> local_min = std::numeric_limits<acc_type<cache_t, true>>::max(); acc_type<cache_t, true> local_max = std::numeric_limits<acc_type<cache_t, true>>::lowest(); if (std::is_same<emb_t, uint8_t>::value) { for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) { Vec4T<cache_t> cache_weights_vec = weight_row.load(d * 4, qparams); // qparams not used local_max = max(local_max, vec4_max(cache_weights_vec)); local_min = min(local_min, vec4_min(cache_weights_vec)); } qparams = warp_find_qparams(local_min, local_max); if (threadIdx.x == 0) { weight_row.store_qparams(qparams); } } for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) { Vec4T<cache_t> cache_weights_vec = weight_row.load(d * 4, qparams); weight_row.evict( cache_weights_vec, d * 4, qparams); // FP32 -> FP16/FP32 } } int32_t D_emb = D_insert; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } // insert into cache auto weight_row_cache = WeightRow<emb_t, cache_t, cache_t>( &weights[weights_offset_insert + idx_insert * D_emb + 0], &lxu_cache_weights[cache_set * kWarpSize + insert_slot][0], D_insert, nullptr); auto weight_row_emb = WeightRow<emb_t, cache_t, cache_t>( &weights[weights_offset_insert + idx_insert * D_emb + 0], nullptr, D_insert, nullptr); float2 qparams; if (std::is_same<emb_t, uint8_t>::value) { qparams = weight_row_emb.load_qparams(); } for (int32_t d = threadIdx.x; d * 4 < D_insert; d += blockDim.x) { auto row = weight_row_emb.load(d * 4, qparams); weight_row_cache.store(row, d * 4, qparams); } if (threadIdx.x == 0) { lxu_cache_state[cache_set][insert_slot] = insert_idx; lru_state[cache_set][insert_slot] = time_stamp; } } } void lru_cache_insert_cuda( Tensor weights, Tensor cache_hash_size_cumsum, Tensor cache_index_table_map, Tensor weights_offsets, Tensor D_offsets, Tensor sorted_cache_sets, Tensor cache_set_sorted_unique_indices, Tensor unique_indices_length, Tensor lxu_cache_state, Tensor lxu_cache_weights, int64_t time_stamp, Tensor lru_state, bool stochastic_rounding) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(weights.get_device()); int32_t N = cache_set_sorted_unique_indices.numel(); DISPATCH_EMB_CACHE_TYPES( weights.type(), lxu_cache_weights.type(), "lru_cache_insert_kernel_2", ([&] { PhiloxCudaState rng_engine_inputs; if (stochastic_rounding && !std::is_same<emb_t, float>::value) { auto gen = at::cuda::detail::getDefaultCUDAGenerator(); std::lock_guard<std::mutex> lock(gen.mutex()); rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen) ->philox_cuda_state(4); } hipLaunchKernelGGL(( lru_cache_insert_kernel<emb_t, cache_t>) , dim3(div_round_up(N, kMaxThreads / kWarpSize)), dim3(dim3(kWarpSize, kMaxThreads / kWarpSize)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(), cache_hash_size_cumsum .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), cache_index_table_map .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), weights_offsets .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), sorted_cache_sets .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), cache_set_sorted_unique_indices .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), unique_indices_length.data_ptr<int32_t>(), lxu_cache_state .packed_accessor32<int64_t, 2, RestrictPtrTraits>(), lxu_cache_weights .packed_accessor64<cache_t, 2, RestrictPtrTraits>(), time_stamp, lru_state.packed_accessor32<int64_t, 2, RestrictPtrTraits>(), stochastic_rounding, rng_engine_inputs); })); C10_HIP_KERNEL_LAUNCH_CHECK(); } void lru_cache_populate_cuda( Tensor weights, Tensor cache_hash_size_cumsum, int64_t total_cache_hash_size, Tensor cache_index_table_map, Tensor weights_offsets, Tensor D_offsets, Tensor linear_cache_indices, Tensor lxu_cache_state, Tensor lxu_cache_weights, int64_t time_stamp, Tensor lru_state, bool stochastic_rounding) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(weights.get_device()); TORCH_CHECK( linear_cache_indices.numel() < std::numeric_limits<int32_t>::max()); if (linear_cache_indices.numel() == 0) { // nothing to do return; } // Get unqiue indices Tensor unique_indices; Tensor unique_indices_length; c10::optional<Tensor> unique_indices_count; std::tie(unique_indices, unique_indices_length, unique_indices_count) = get_unique_indices_cuda( linear_cache_indices, total_cache_hash_size, false); // Find uncached indices auto cache_sets_and_unique_indices = lru_cache_find_uncached_cuda( unique_indices, unique_indices_length, total_cache_hash_size, lxu_cache_state, time_stamp, lru_state); auto sorted_cache_sets = cache_sets_and_unique_indices.first; auto cache_set_sorted_unique_indices = cache_sets_and_unique_indices.second; // insert caching weights lru_cache_insert_cuda( weights, cache_hash_size_cumsum, cache_index_table_map, weights_offsets, D_offsets, sorted_cache_sets, cache_set_sorted_unique_indices, unique_indices_length, lxu_cache_state, lxu_cache_weights, time_stamp, lru_state, stochastic_rounding); } __global__ __launch_bounds__(kMaxThreads) void lfu_update_counts_kernel( const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> unique_indices, const int32_t* __restrict__ N_unique, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> unique_indices_count, PackedTensorAccessor64<int64_t, 1, RestrictPtrTraits> lfu_state) { int32_t n = blockIdx.x * blockDim.x + threadIdx.x; if (n >= *N_unique) { return; } int64_t idx = unique_indices[n]; lfu_state[idx] += unique_indices_count[n]; } void lfu_update_counts_cuda( Tensor unique_indices, Tensor unique_indices_length, Tensor unique_indices_count, Tensor lfu_state) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(unique_indices.get_device()); int32_t N = unique_indices.size(0); hipLaunchKernelGGL(( lfu_update_counts_kernel), dim3(div_round_up(N, kMaxThreads)), dim3(kMaxThreads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), unique_indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), unique_indices_length.data_ptr<int32_t>(), unique_indices_count.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), lfu_state.packed_accessor64<int64_t, 1, RestrictPtrTraits>()); C10_HIP_KERNEL_LAUNCH_CHECK(); } constexpr int32_t kCacheSetBits = 24; constexpr int32_t kLFUCounterBits = 40; static_assert(kCacheSetBits + kLFUCounterBits == 8 * sizeof(int64_t), ""); __global__ __launch_bounds__(kMaxThreads) void lfu_cache_find_uncached_kernel( const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> unique_indices, const int32_t* __restrict__ N_unique, int64_t max_indices, const PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lxu_cache_state, uint64_t* __restrict__ cache_sets, const PackedTensorAccessor64<int64_t, 1, RestrictPtrTraits> lfu_state) { int32_t N = unique_indices.size(0); int32_t C = lxu_cache_state.size(0); int32_t n = blockIdx.x * blockDim.y + threadIdx.y; if (n >= N) { return; } if (n >= *N_unique) { if (threadIdx.x == 0) { cache_sets[n] = (static_cast<uint64_t>(C) << kLFUCounterBits); // invalid index, used as sentinel } return; } int64_t idx = unique_indices[n]; if (idx == max_indices) { if (threadIdx.x == 0) { cache_sets[n] = (static_cast<uint64_t>(C) << kLFUCounterBits); // invalid index, used as sentinel } return; } uint32_t cache_set = cache_slot(idx, C); auto slot = threadIdx.x; bool found = __ldg((&lxu_cache_state[cache_set][0]) + slot) == idx; if (found) { // mark it as existing. cache_sets[n] = (static_cast<uint64_t>(C) << kLFUCounterBits); // invalid index, used as sentinel } if (!__any_sync(0xFFFFFFFF, found)) { if (threadIdx.x == 0) { // sort so the highest LFUs come first in the segment. // assume lfu_state[idx] <= 2^40 - 1 and cache_set < 2^24 -1 cache_sets[n] = ((static_cast<uint64_t>(cache_set) << kLFUCounterBits)) | ((static_cast<uint64_t>(1) << kLFUCounterBits) - 1 - lfu_state[idx]); } } } std::pair<Tensor, Tensor> lfu_cache_find_uncached_cuda( Tensor unique_indices, Tensor unique_indices_length, int64_t max_indices, Tensor lxu_cache_state, Tensor lfu_state) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(unique_indices.get_device()); auto cache_sets = empty_like(unique_indices, unique_indices.options().dtype(kLong)); int32_t N = unique_indices.numel(); auto sorted_cache_sets = empty_like(cache_sets); auto cache_set_sorted_unique_indices = empty_like(unique_indices); // Find uncached indices hipLaunchKernelGGL(( lfu_cache_find_uncached_kernel), dim3(div_round_up(N, kMaxThreads / kWarpSize)), dim3(dim3(kWarpSize, kMaxThreads / kWarpSize)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), unique_indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), unique_indices_length.data_ptr<int32_t>(), max_indices, lxu_cache_state.packed_accessor32<int64_t, 2, RestrictPtrTraits>(), (uint64_t*)cache_sets.data_ptr<int64_t>(), lfu_state.packed_accessor64<int64_t, 1, RestrictPtrTraits>()); C10_HIP_KERNEL_LAUNCH_CHECK(); // Sort the cache sets and ids size_t temp_storage_bytes = 0; AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, (uint64_t*)cache_sets.data_ptr<int64_t>(), (uint64_t*)sorted_cache_sets.data_ptr<int64_t>(), unique_indices.data_ptr<int64_t>(), cache_set_sorted_unique_indices.data_ptr<int64_t>(), N, 0, int(log2(float(lxu_cache_state.size(0) + 1)) + 1) + kLFUCounterBits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, unique_indices.options().dtype(kByte)); AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs( temp_storage.data_ptr(), temp_storage_bytes, (uint64_t*)cache_sets.data_ptr<int64_t>(), (uint64_t*)sorted_cache_sets.data_ptr<int64_t>(), unique_indices.data_ptr<int64_t>(), cache_set_sorted_unique_indices.data_ptr<int64_t>(), N, 0, int(log2(float(lxu_cache_state.size(0) + 1)) + 1) + kLFUCounterBits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); return {sorted_cache_sets, cache_set_sorted_unique_indices}; } template <typename emb_t, typename cache_t> __global__ __launch_bounds__(kCacheMaxThreads) void lfu_cache_insert_kernel( PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> weights, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> cache_hash_size_cumsum, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> cache_index_table_map, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets, const uint64_t* __restrict__ sorted_cache_sets, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> cache_set_sorted_indices, const int32_t* __restrict__ N_unique, PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lxu_cache_state, PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits> lxu_cache_weights, const PackedTensorAccessor64<int64_t, 1, RestrictPtrTraits> lfu_state, bool stochastic_rounding, PhiloxCudaState stochastic_rounding_philox_args) { int32_t C = lxu_cache_state.size(0); int32_t n = blockIdx.x * blockDim.y + threadIdx.y; if (n >= *N_unique) { return; } // check if this warp is responsible for this whole segment. bool segment_start = (n == 0 || (sorted_cache_sets[n - 1] >> kLFUCounterBits) != (sorted_cache_sets[n] >> kLFUCounterBits)); if (!segment_start) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } uint32_t cache_set = (sorted_cache_sets[n] >> kLFUCounterBits); if (cache_set == C) { // ignore the already-existing elements return; } int32_t SL = 1; while (n + SL < *N_unique && (sorted_cache_sets[n + SL] >> kLFUCounterBits) == cache_set) { SL += 1; } // now, we need to insert the (unique!) values in indices[n:n + SL] into // our slots. int32_t slot = threadIdx.x; int64_t current_idx = lxu_cache_state[cache_set][slot]; int64_t current_lfu_cost = (current_idx != static_cast<int64_t>(kCacheStateInvalid)) ? lfu_state[current_idx] : -1; int64_t costs[1] = {current_lfu_cost}; int32_t slots[1] = {slot}; BitonicSort<int64_t, int32_t, 1, Comparator<int64_t>>::sort(costs, slots); int32_t sorted_slot = slots[0]; int64_t sorted_lfu_cost = costs[0]; for (int32_t l = 0; l < min(SL, kWarpSize); ++l) { int32_t insert_slot = __shfl_sync(0xFFFFFFFF, sorted_slot, l); int64_t insert_current_lfu_cost = __shfl_sync(0xFFFFFFFF, sorted_lfu_cost, l); int64_t insert_idx = cache_set_sorted_indices[n + l]; int64_t insert_lfu_cost = lfu_state[insert_idx]; if (insert_current_lfu_cost > insert_lfu_cost) { // don't insert. // all subsequent `current_lfu_cost` values are greater, and all // subsequent `insert_lfu_cost` values are smaller, so we can exit // early here. return; } int32_t t_insert = cache_index_table_map[insert_idx]; int64_t idx_insert = insert_idx - cache_hash_size_cumsum[t_insert]; int64_t weights_offset_insert = weights_offsets[t_insert]; int32_t D_start_insert = D_offsets[t_insert]; int32_t D_end_insert = D_offsets[t_insert + 1]; int32_t D_insert = D_end_insert - D_start_insert; // not empty if (insert_current_lfu_cost != -1) { // ensure that threadIdx.x is the only thread reading/writing to // lxu_cache_state int64_t current_idx = threadIdx.x == 0 ? lxu_cache_state[cache_set][insert_slot] : 0; current_idx = __shfl_sync(0xFFFFFFFF, current_idx, 0); int32_t t_current = cache_index_table_map[current_idx]; int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current]; int64_t weights_offset_current = weights_offsets[t_current]; int32_t D_start_current = D_offsets[t_current]; int32_t D_end_current = D_offsets[t_current + 1]; int32_t D_current = D_end_current - D_start_current; int32_t D_emb = D_current; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } auto weight_row = WeightRow<emb_t, cache_t, cache_t>( &weights[weights_offset_current + idx_current * D_emb + 0], &lxu_cache_weights[cache_set * kWarpSize + insert_slot][0], D_current, nullptr); if (!std::is_same<emb_t, float>::value && stochastic_rounding) { StochasticRoundingRNGState state; // different for every *run* and every *thread*. auto stochastic_rounding_seeds = at::cuda::philox::unpack(stochastic_rounding_philox_args); stochastic_rounding_init( std::get<0>(stochastic_rounding_seeds) ^ std::get<1>(stochastic_rounding_seeds), (blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x) * kWarpSize + l, &state); weight_row.set_stoc_state(&state); } float2 qparams; acc_type<cache_t, true> local_min = std::numeric_limits<acc_type<cache_t, true>>::max(); acc_type<cache_t, true> local_max = std::numeric_limits<acc_type<cache_t, true>>::lowest(); if (std::is_same<emb_t, uint8_t>::value) { for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) { Vec4T<cache_t> cache_weights_vec = weight_row.load(d * 4, qparams); // qparams not used local_max = max(local_max, vec4_max(cache_weights_vec)); local_min = min(local_min, vec4_min(cache_weights_vec)); } qparams = warp_find_qparams(local_min, local_max); if (threadIdx.x == 0) { weight_row.store_qparams(qparams); } } for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) { Vec4T<cache_t> cache_weights_vec = weight_row.load(d * 4, qparams); weight_row.evict(cache_weights_vec, d * 4, qparams); } } // insert into cache int32_t D_emb = D_insert; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } auto weight_row_cache = WeightRow<emb_t, cache_t, cache_t>( &weights[weights_offset_insert + idx_insert * D_emb + 0], &lxu_cache_weights[cache_set * kWarpSize + insert_slot][0], D_insert, nullptr); auto weight_row_emb = WeightRow<emb_t, cache_t, cache_t>( &weights[weights_offset_insert + idx_insert * D_emb + 0], nullptr, D_insert, nullptr); float2 qparams; if (std::is_same<emb_t, uint8_t>::value) { qparams = weight_row_emb.load_qparams(); } for (int32_t d = threadIdx.x; d * 4 < D_insert; d += blockDim.x) { auto row = weight_row_emb.load(d * 4, qparams); weight_row_cache.store(row, d * 4, qparams); } if (threadIdx.x == 0) { lxu_cache_state[cache_set][insert_slot] = insert_idx; } } } void lfu_cache_insert_cuda( Tensor weights, Tensor cache_hash_size_cumsum, Tensor cache_index_table_map, Tensor weights_offsets, Tensor D_offsets, Tensor sorted_cache_sets, Tensor cache_set_sorted_unique_indices, Tensor unique_indices_length, Tensor lxu_cache_state, Tensor lxu_cache_weights, Tensor lfu_state, bool stochastic_rounding) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(weights.get_device()); int32_t N = cache_set_sorted_unique_indices.numel(); DISPATCH_EMB_CACHE_TYPES( weights.type(), lxu_cache_weights.type(), "lfu_cache_insert_kernel_2", ([&] { PhiloxCudaState rng_engine_inputs; if (stochastic_rounding && !std::is_same<emb_t, float>::value) { auto gen = at::cuda::detail::getDefaultCUDAGenerator(); std::lock_guard<std::mutex> lock(gen.mutex()); rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen) ->philox_cuda_state(4); } hipLaunchKernelGGL(( lfu_cache_insert_kernel<emb_t, cache_t>) , dim3(div_round_up(N, kCacheMaxThreads / kWarpSize)), dim3(dim3(kWarpSize, kCacheMaxThreads / kWarpSize)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(), cache_hash_size_cumsum .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), cache_index_table_map .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), weights_offsets .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), (uint64_t*)sorted_cache_sets.data_ptr<int64_t>(), cache_set_sorted_unique_indices .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), unique_indices_length.data_ptr<int32_t>(), lxu_cache_state .packed_accessor32<int64_t, 2, RestrictPtrTraits>(), lxu_cache_weights .packed_accessor64<cache_t, 2, RestrictPtrTraits>(), lfu_state.packed_accessor64<int64_t, 1, RestrictPtrTraits>(), stochastic_rounding, rng_engine_inputs); })); C10_HIP_KERNEL_LAUNCH_CHECK(); } void lfu_cache_populate_cuda( Tensor weights, Tensor cache_hash_size_cumsum, int64_t total_cache_hash_size, Tensor cache_index_table_map, Tensor weights_offsets, Tensor D_offsets, Tensor linear_cache_indices, Tensor lxu_cache_state, Tensor lxu_cache_weights, Tensor lfu_state, bool stochastic_rounding) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(weights.get_device()); TORCH_CHECK( linear_cache_indices.numel() < std::numeric_limits<int32_t>::max()); if (linear_cache_indices.numel() == 0) { // nothing to do return; } // get unqiue indices Tensor unique_indices; Tensor unique_indices_length; c10::optional<Tensor> unique_indices_count; std::tie(unique_indices, unique_indices_length, unique_indices_count) = get_unique_indices_cuda( linear_cache_indices, total_cache_hash_size, true); // update lfu counts lfu_update_counts_cuda( unique_indices, unique_indices_length, *unique_indices_count, lfu_state); // find uncached indices auto cache_sets_and_unique_indices = lfu_cache_find_uncached_cuda( unique_indices, unique_indices_length, total_cache_hash_size, lxu_cache_state, lfu_state); auto sorted_cache_sets = cache_sets_and_unique_indices.first; auto cache_set_sorted_unique_indices = cache_sets_and_unique_indices.second; // insert caching weights lfu_cache_insert_cuda( weights, cache_hash_size_cumsum, cache_index_table_map, weights_offsets, D_offsets, sorted_cache_sets, cache_set_sorted_unique_indices, unique_indices_length, lxu_cache_state, lxu_cache_weights, lfu_state, stochastic_rounding); } __global__ __launch_bounds__(kMaxThreads) void lxu_cache_lookup_kernel( const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> linear_cache_indices, const PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lxu_cache_state, PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> lxu_cache_locations) { const int32_t C = lxu_cache_state.size(0); const int32_t N = linear_cache_indices.size(0); int32_t n = blockIdx.x * blockDim.y + threadIdx.y; if (n >= N) { return; } int64_t idx = linear_cache_indices[n]; int32_t cache_set = cache_slot(idx, C); auto slot = threadIdx.x; bool found = (__ldg((&lxu_cache_state[cache_set][0]) + slot) == idx); if (found) { lxu_cache_locations[n] = cache_set * kWarpSize + slot; } if (!__any_sync(0xFFFFFFFF, found)) { if (threadIdx.x == 0) { lxu_cache_locations[n] = kCacheLocationMissing; } } } Tensor lxu_cache_lookup_cuda( Tensor linear_cache_indices, Tensor lxu_cache_state) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(linear_cache_indices.get_device()); const auto N = linear_cache_indices.numel(); auto lxu_cache_locations = empty_like( linear_cache_indices, linear_cache_indices.options().dtype(kInt)); if (linear_cache_indices.numel() == 0) { // nothing to do return lxu_cache_locations; } const dim3 threads(kWarpSize, kMaxThreads / kWarpSize); const dim3 blocks(div_round_up(N, kMaxThreads / kWarpSize)); hipLaunchKernelGGL(( lxu_cache_lookup_kernel), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), linear_cache_indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), lxu_cache_state.packed_accessor32<int64_t, 2, RestrictPtrTraits>(), lxu_cache_locations.packed_accessor32<int32_t, 1, RestrictPtrTraits>()); C10_HIP_KERNEL_LAUNCH_CHECK(); return lxu_cache_locations; }
e5eed3238a3baf1c19be1ad1e9ea6d6792136913.cu
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/CUDAGeneratorImpl.h> #include <ATen/TensorUtils.h> #include <ATen/core/TensorAccessor.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <cuda.h> #include <cuda_runtime.h> #include <curand_kernel.h> #include <ATen/cuda/CUDAGraphsUtils.cuh> #include <THC/THCAtomics.cuh> #include <limits> #include <mutex> #include "cub/device/device_radix_sort.cuh" #include "cub/device/device_run_length_encode.cuh" #include "cub/device/device_select.cuh" #include "fbgemm_gpu/dispatch_macros.h" #include "fbgemm_gpu/fbgemm_cuda_utils.cuh" #include "split_embeddings_utils.cuh" constexpr size_t kCacheMaxThreads = 512; using namespace at; using namespace fbgemm_gpu; // TODO: do we care about 64-bit indices? Currently we just ignore. __host__ DEVICE_INLINE uint32_t cache_slot(int32_t h_in, int32_t C) { // MurmorHash3 32-bit mixing function. uint32_t h = (uint32_t)h_in; h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; // https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ return ((uint64_t)h * (uint64_t)C) >> 32; } __host__ DEVICE_INLINE uint32_t cache_slot(int64_t h_in, int32_t C) { // MurmurHash3 64-bit mixing function. uint64_t h = (uint64_t)h_in; h ^= h >> 33; h *= 0xff51afd7ed558ccd; h ^= h >> 33; h *= 0xc4ceb9fe1a85ec53; h ^= h >> 33; return h % (uint32_t)C; } int64_t host_lxu_cache_slot(int64_t h_in, int64_t C) { return static_cast<int64_t>(cache_slot(h_in, static_cast<int32_t>(C))); } constexpr int32_t kCacheLocationMissing = -1; constexpr int64_t kCacheStateInvalid = -1; template <typename emb_t, typename cache_t> __global__ __launch_bounds__(kMaxThreads) void lxu_cache_flush_kernel( PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> weights, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> cache_hash_size_cumsum, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> cache_index_table_map, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets, PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lxu_cache_state, PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits> lxu_cache_weights, bool stochastic_rounding, PhiloxCudaState stochastic_rounding_philox_args) { int32_t B = lxu_cache_weights.size(0); int32_t b = blockIdx.x * blockDim.y + threadIdx.y; if (b >= B) { return; } int32_t slot = b % kWarpSize; int32_t cache_set = b / kWarpSize; int64_t current_idx = lxu_cache_state[cache_set][slot]; if (current_idx != static_cast<int64_t>(kCacheStateInvalid)) { // evict from slot to backing storage int32_t t_current = cache_index_table_map[current_idx]; int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current]; int64_t weights_offset_current = weights_offsets[t_current]; int32_t D_start_current = D_offsets[t_current]; int32_t D_end_current = D_offsets[t_current + 1]; int32_t D_current = D_end_current - D_start_current; int32_t D_emb = D_current; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } auto weight_row = WeightRow<emb_t, cache_t, acc_type<cache_t, true>>( &weights[weights_offset_current + idx_current * D_emb + 0], &lxu_cache_weights[b][0], D_current, nullptr); if (!std::is_same<emb_t, float>::value && stochastic_rounding) { StochasticRoundingRNGState state; // different for every *run* and every *thread*. auto stochastic_rounding_seeds = at::cuda::philox::unpack(stochastic_rounding_philox_args); stochastic_rounding_init( std::get<0>(stochastic_rounding_seeds) ^ std::get<1>(stochastic_rounding_seeds), blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x, &state); weight_row.set_stoc_state(&state); } float2 qparams; if (std::is_same<emb_t, uint8_t>::value) { qparams = thrust_find_qparams<cache_t>(&lxu_cache_weights[b][0], D_current); if (threadIdx.x == 0) { weight_row.store_qparams(qparams); } } for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) { Vec4T<acc_type<cache_t, true>> cache_weights_vec = weight_row.load(d * 4, qparams); weight_row.evict(cache_weights_vec, d * 4, qparams); } } } void lxu_cache_flush_cuda( Tensor uvm_weights, Tensor cache_hash_size_cumsum, Tensor cache_index_table_map, Tensor weights_offsets, Tensor D_offsets, int64_t total_D, Tensor lxu_cache_state, Tensor lxu_cache_weights, bool stochastic_rounding) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(lxu_cache_weights.get_device()); int32_t T = D_offsets.numel() - 1; int32_t S = lxu_cache_weights.size(0); int32_t tx = std::min<int32_t>(total_D / 4 / T, kMaxThreads); dim3 threads(tx, kMaxThreads / tx); dim3 blocks(div_round_up(S, kMaxThreads / tx)); DISPATCH_EMB_CACHE_TYPES( uvm_weights.type(), lxu_cache_weights.type(), "lxu_cache_flush_kernel_2", ([&] { PhiloxCudaState rng_engine_inputs; if (stochastic_rounding && std::is_same<emb_t, Half>::value) { auto gen = at::cuda::detail::getDefaultCUDAGenerator(); std::lock_guard<std::mutex> lock(gen.mutex()); rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen) ->philox_cuda_state(4); } lxu_cache_flush_kernel<emb_t, cache_t> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( uvm_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(), cache_hash_size_cumsum .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), cache_index_table_map .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), weights_offsets .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), lxu_cache_state .packed_accessor32<int64_t, 2, RestrictPtrTraits>(), lxu_cache_weights .packed_accessor64<cache_t, 2, RestrictPtrTraits>(), stochastic_rounding, rng_engine_inputs); })); C10_CUDA_KERNEL_LAUNCH_CHECK(); return; } __global__ __launch_bounds__(kMaxThreads) void linearize_cache_indices_kernel( const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> cache_hash_size_cumsum, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> indices, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> offsets, PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> linear_cache_indices) { int32_t T = cache_hash_size_cumsum.size(0) - 1; int64_t total_cache_hash_size = cache_hash_size_cumsum[T]; int32_t B = (offsets.size(0) - 1) / T; int32_t b_t = blockIdx.x * blockDim.x + threadIdx.x; int32_t b = b_t % B; int32_t t = b_t / B; bool valid = t < T; int64_t hash_offset = valid ? cache_hash_size_cumsum[t] : -1; int64_t indices_start = valid ? offsets[t * B + b] : -1; int32_t L = valid ? offsets[t * B + b + 1] - indices_start : 0; int32_t lane_id = threadIdx.x % kWarpSize; // hash_offset < 0 for non-caching tables for (int32_t j = 0; j < kWarpSize; ++j) { int64_t indices_start_warp = __shfl_sync(0xFFFFFFFF, indices_start, j); int32_t L_warp = __shfl_sync(0xFFFFFFFF, L, j); int64_t hash_offset_warp = __shfl_sync(0xFFFFFFFF, hash_offset, j); if (hash_offset_warp >= 0) { for (int32_t i = lane_id; i < L_warp; i += kWarpSize) { auto idx = __ldg(&indices[indices_start_warp + i]); linear_cache_indices[indices_start_warp + i] = hash_offset_warp + idx; } } else { for (int32_t i = lane_id; i < L_warp; i += kWarpSize) { linear_cache_indices[indices_start_warp + i] = total_cache_hash_size; } } } } Tensor linearize_cache_indices_cuda( Tensor cache_hash_size_cumsum, Tensor indices, Tensor offsets) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(cache_hash_size_cumsum.get_device()); auto T = cache_hash_size_cumsum.size(0) - 1; TORCH_CHECK(T > 0); // offsets = [B x T + 1] auto B = (offsets.size(0) - 1) / T; TORCH_CHECK(B > 0); auto linear_cache_indices = at::empty_like(indices); linearize_cache_indices_kernel<<< div_round_up(B * T, kMaxThreads), kMaxThreads, 0, at::cuda::getCurrentCUDAStream()>>>( cache_hash_size_cumsum.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), linear_cache_indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); return linear_cache_indices; } std::tuple<Tensor, Tensor, c10::optional<Tensor>> get_unique_indices_cuda( Tensor linear_indices, int64_t max_indices, bool compute_count) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(linear_indices.get_device()); TORCH_CHECK(linear_indices.numel() < std::numeric_limits<int32_t>::max()); int32_t N = linear_indices.numel(); auto sorted_indices = at::empty_like(linear_indices); auto unique_indices = at::empty_like(linear_indices); auto unique_indices_length = at::empty({1}, linear_indices.options().dtype(kInt)); c10::optional<Tensor> unique_indices_count = c10::nullopt; if (compute_count) { unique_indices_count = at::empty( {linear_indices.numel()}, linear_indices.options().dtype(kInt)); } // sort indices size_t temp_storage_bytes_0 = 0; AT_CUDA_CHECK(cub::DeviceRadixSort::SortKeys( nullptr, temp_storage_bytes_0, linear_indices.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(), N, 0, int(log2(float(max_indices + 1)) + 1), at::cuda::getCurrentCUDAStream(), false)); auto temp_storage_0 = at::empty( {static_cast<int64_t>(temp_storage_bytes_0)}, linear_indices.options().dtype(kByte)); AT_CUDA_CHECK(cub::DeviceRadixSort::SortKeys( temp_storage_0.data_ptr(), temp_storage_bytes_0, linear_indices.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(), N, 0, int(log2(float(max_indices + 1)) + 1), at::cuda::getCurrentCUDAStream(), false)); // get unique indices if (compute_count) { size_t temp_storage_bytes_1 = 0; AT_CUDA_CHECK(cub::DeviceRunLengthEncode::Encode( nullptr, temp_storage_bytes_1, sorted_indices.data_ptr<int64_t>(), unique_indices.data_ptr<int64_t>(), unique_indices_count->data_ptr<int32_t>(), unique_indices_length.data_ptr<int32_t>(), N, at::cuda::getCurrentCUDAStream(), false)); auto temp_storage_1 = at::empty( {static_cast<int64_t>(temp_storage_bytes_1)}, linear_indices.options().dtype(kByte)); AT_CUDA_CHECK(cub::DeviceRunLengthEncode::Encode( temp_storage_1.data_ptr(), temp_storage_bytes_1, sorted_indices.data_ptr<int64_t>(), unique_indices.data_ptr<int64_t>(), unique_indices_count->data_ptr<int32_t>(), unique_indices_length.data_ptr<int32_t>(), N, at::cuda::getCurrentCUDAStream(), false)); } else { size_t temp_storage_bytes_1 = 0; AT_CUDA_CHECK(cub::DeviceSelect::Unique( nullptr, temp_storage_bytes_1, sorted_indices.data_ptr<int64_t>(), unique_indices.data_ptr<int64_t>(), unique_indices_length.data_ptr<int32_t>(), N, at::cuda::getCurrentCUDAStream(), false)); auto temp_storage_1 = at::empty( {static_cast<int64_t>(temp_storage_bytes_1)}, linear_indices.options().dtype(kByte)); AT_CUDA_CHECK(cub::DeviceSelect::Unique( temp_storage_1.data_ptr(), temp_storage_bytes_1, sorted_indices.data_ptr<int64_t>(), unique_indices.data_ptr<int64_t>(), unique_indices_length.data_ptr<int32_t>(), N, at::cuda::getCurrentCUDAStream(), false)); } return std::make_tuple( unique_indices, unique_indices_length, unique_indices_count); } __global__ __launch_bounds__(kMaxThreads) void lru_cache_find_uncached_kernel( const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> unique_indices, const int32_t* __restrict__ N_unique, int64_t max_indices, const PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lxu_cache_state, PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> cache_sets, int64_t time_stamp, PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lru_state) { int32_t N = unique_indices.size(0); int32_t C = lxu_cache_state.size(0); int32_t n = blockIdx.x * blockDim.y + threadIdx.y; if (n >= N) { return; } if (n >= *N_unique) { if (threadIdx.x == 0) { cache_sets[n] = C; // invalid index, used as sentinel } return; } int64_t idx = unique_indices[n]; if (idx == max_indices) { if (threadIdx.x == 0) { cache_sets[n] = C; // invalid index, used as sentinel } return; } int32_t cache_set = cache_slot(idx, C); auto slot = threadIdx.x; bool found = __ldg((&lxu_cache_state[cache_set][0]) + slot) == idx; if (found) { // mark it as existing. cache_sets[n] = C; // invalid index, used as sentinel // mark it as recently accessed so we don't evict. lru_state[cache_set][slot] = time_stamp; } if (!__any_sync(0xFFFFFFFF, found)) { if (threadIdx.x == 0) { cache_sets[n] = cache_set; } } } std::pair<Tensor, Tensor> lru_cache_find_uncached_cuda( Tensor unique_indices, Tensor unique_indices_length, int64_t max_indices, Tensor lxu_cache_state, int64_t time_stamp, Tensor lru_state) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(unique_indices.get_device()); auto cache_sets = empty_like(unique_indices, unique_indices.options().dtype(kInt)); int32_t N = unique_indices.numel(); auto sorted_cache_sets = empty_like(cache_sets); auto cache_set_sorted_unique_indices = empty_like(unique_indices); // Find uncached indices lru_cache_find_uncached_kernel<<< div_round_up(N, kMaxThreads / kWarpSize), dim3(kWarpSize, kMaxThreads / kWarpSize), 0, at::cuda::getCurrentCUDAStream()>>>( unique_indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), unique_indices_length.data_ptr<int32_t>(), max_indices, lxu_cache_state.packed_accessor32<int64_t, 2, RestrictPtrTraits>(), cache_sets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), time_stamp, lru_state.packed_accessor32<int64_t, 2, RestrictPtrTraits>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); // Sort the cache sets and ids size_t temp_storage_bytes = 0; AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, cache_sets.data_ptr<int32_t>(), sorted_cache_sets.data_ptr<int32_t>(), unique_indices.data_ptr<int64_t>(), cache_set_sorted_unique_indices.data_ptr<int64_t>(), N, 0, int(log2(float(lxu_cache_state.size(0) + 1)) + 1), at::cuda::getCurrentCUDAStream(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, unique_indices.options().dtype(kByte)); AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs( temp_storage.data_ptr(), temp_storage_bytes, cache_sets.data_ptr<int32_t>(), sorted_cache_sets.data_ptr<int32_t>(), unique_indices.data_ptr<int64_t>(), cache_set_sorted_unique_indices.data_ptr<int64_t>(), N, 0, int(log2(float(lxu_cache_state.size(0) + 1)) + 1), at::cuda::getCurrentCUDAStream(), false)); return {sorted_cache_sets, cache_set_sorted_unique_indices}; } template <typename emb_t, typename cache_t> __global__ __launch_bounds__(kMaxThreads) void lru_cache_insert_kernel( PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> weights, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> cache_hash_size_cumsum, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> cache_index_table_map, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_cache_sets, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> cache_set_sorted_indices, const int32_t* __restrict__ N_unique, PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lxu_cache_state, PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits> lxu_cache_weights, int64_t time_stamp, PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lru_state, bool stochastic_rounding, PhiloxCudaState stochastic_rounding_philox_args) { int32_t C = lxu_cache_state.size(0); int32_t n = blockIdx.x * blockDim.y + threadIdx.y; if (n >= *N_unique) { return; } // check if this warp is responsible for this whole segment. bool segment_start = (n == 0 || sorted_cache_sets[n - 1] != sorted_cache_sets[n]); if (!segment_start) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } int32_t cache_set = sorted_cache_sets[n]; if (cache_set == C) { // ignore the already-existing elements return; } int32_t SL = 1; while (n + SL < *N_unique && sorted_cache_sets[n + SL] == cache_set) { SL += 1; } // now, we need to insert the (unique!) values in indices[n:n + SL] into // our slots. int32_t slot = threadIdx.x; int64_t slot_time = lru_state[cache_set][slot]; int64_t costs[1] = {slot_time}; int32_t slots[1] = {slot}; BitonicSort<int64_t, int32_t, 1, Comparator<int64_t>>::sort(costs, slots); int32_t sorted_slot = slots[0]; int64_t sorted_lru_cost = costs[0]; for (int32_t l = 0; l < min(SL, kWarpSize); ++l) { int32_t insert_slot = __shfl_sync(0xFFFFFFFF, sorted_slot, l); int64_t insert_current_lru_cost = __shfl_sync(0xFFFFFFFF, sorted_lru_cost, l); if (insert_current_lru_cost == time_stamp) { return; } int64_t insert_idx = cache_set_sorted_indices[n + l]; int32_t t_insert = cache_index_table_map[insert_idx]; int64_t idx_insert = insert_idx - cache_hash_size_cumsum[t_insert]; int64_t weights_offset_insert = weights_offsets[t_insert]; int32_t D_start_insert = D_offsets[t_insert]; int32_t D_end_insert = D_offsets[t_insert + 1]; int32_t D_insert = D_end_insert - D_start_insert; // ensure that threadIdx.x is the only thread reading/writing to // lxu_cache_state int64_t current_idx = threadIdx.x == 0 ? lxu_cache_state[cache_set][insert_slot] : 0; current_idx = __shfl_sync(0xFFFFFFFF, current_idx, 0); // not empty if (current_idx != static_cast<int64_t>(kCacheStateInvalid)) { // evict from slot to backing storage int32_t t_current = cache_index_table_map[current_idx]; int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current]; int64_t weights_offset_current = weights_offsets[t_current]; int32_t D_start_current = D_offsets[t_current]; int32_t D_end_current = D_offsets[t_current + 1]; int32_t D_current = D_end_current - D_start_current; int32_t D_emb = D_current; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } auto weight_row = WeightRow<emb_t, cache_t, cache_t>( &weights[weights_offset_current + idx_current * D_emb + 0], &lxu_cache_weights[cache_set * kWarpSize + insert_slot][0], D_current, nullptr); if (!std::is_same<emb_t, float>::value && stochastic_rounding) { StochasticRoundingRNGState state; // different for every *run* and every *thread*. auto stochastic_rounding_seeds = at::cuda::philox::unpack(stochastic_rounding_philox_args); stochastic_rounding_init( std::get<0>(stochastic_rounding_seeds) ^ std::get<1>(stochastic_rounding_seeds), (blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x) * kWarpSize + l, &state); weight_row.set_stoc_state(&state); } float2 qparams; acc_type<cache_t, true> local_min = std::numeric_limits<acc_type<cache_t, true>>::max(); acc_type<cache_t, true> local_max = std::numeric_limits<acc_type<cache_t, true>>::lowest(); if (std::is_same<emb_t, uint8_t>::value) { for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) { Vec4T<cache_t> cache_weights_vec = weight_row.load(d * 4, qparams); // qparams not used local_max = max(local_max, vec4_max(cache_weights_vec)); local_min = min(local_min, vec4_min(cache_weights_vec)); } qparams = warp_find_qparams(local_min, local_max); if (threadIdx.x == 0) { weight_row.store_qparams(qparams); } } for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) { Vec4T<cache_t> cache_weights_vec = weight_row.load(d * 4, qparams); weight_row.evict( cache_weights_vec, d * 4, qparams); // FP32 -> FP16/FP32 } } int32_t D_emb = D_insert; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } // insert into cache auto weight_row_cache = WeightRow<emb_t, cache_t, cache_t>( &weights[weights_offset_insert + idx_insert * D_emb + 0], &lxu_cache_weights[cache_set * kWarpSize + insert_slot][0], D_insert, nullptr); auto weight_row_emb = WeightRow<emb_t, cache_t, cache_t>( &weights[weights_offset_insert + idx_insert * D_emb + 0], nullptr, D_insert, nullptr); float2 qparams; if (std::is_same<emb_t, uint8_t>::value) { qparams = weight_row_emb.load_qparams(); } for (int32_t d = threadIdx.x; d * 4 < D_insert; d += blockDim.x) { auto row = weight_row_emb.load(d * 4, qparams); weight_row_cache.store(row, d * 4, qparams); } if (threadIdx.x == 0) { lxu_cache_state[cache_set][insert_slot] = insert_idx; lru_state[cache_set][insert_slot] = time_stamp; } } } void lru_cache_insert_cuda( Tensor weights, Tensor cache_hash_size_cumsum, Tensor cache_index_table_map, Tensor weights_offsets, Tensor D_offsets, Tensor sorted_cache_sets, Tensor cache_set_sorted_unique_indices, Tensor unique_indices_length, Tensor lxu_cache_state, Tensor lxu_cache_weights, int64_t time_stamp, Tensor lru_state, bool stochastic_rounding) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(weights.get_device()); int32_t N = cache_set_sorted_unique_indices.numel(); DISPATCH_EMB_CACHE_TYPES( weights.type(), lxu_cache_weights.type(), "lru_cache_insert_kernel_2", ([&] { PhiloxCudaState rng_engine_inputs; if (stochastic_rounding && !std::is_same<emb_t, float>::value) { auto gen = at::cuda::detail::getDefaultCUDAGenerator(); std::lock_guard<std::mutex> lock(gen.mutex()); rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen) ->philox_cuda_state(4); } lru_cache_insert_kernel<emb_t, cache_t> <<<div_round_up(N, kMaxThreads / kWarpSize), dim3(kWarpSize, kMaxThreads / kWarpSize), 0, at::cuda::getCurrentCUDAStream()>>>( weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(), cache_hash_size_cumsum .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), cache_index_table_map .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), weights_offsets .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), sorted_cache_sets .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), cache_set_sorted_unique_indices .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), unique_indices_length.data_ptr<int32_t>(), lxu_cache_state .packed_accessor32<int64_t, 2, RestrictPtrTraits>(), lxu_cache_weights .packed_accessor64<cache_t, 2, RestrictPtrTraits>(), time_stamp, lru_state.packed_accessor32<int64_t, 2, RestrictPtrTraits>(), stochastic_rounding, rng_engine_inputs); })); C10_CUDA_KERNEL_LAUNCH_CHECK(); } void lru_cache_populate_cuda( Tensor weights, Tensor cache_hash_size_cumsum, int64_t total_cache_hash_size, Tensor cache_index_table_map, Tensor weights_offsets, Tensor D_offsets, Tensor linear_cache_indices, Tensor lxu_cache_state, Tensor lxu_cache_weights, int64_t time_stamp, Tensor lru_state, bool stochastic_rounding) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(weights.get_device()); TORCH_CHECK( linear_cache_indices.numel() < std::numeric_limits<int32_t>::max()); if (linear_cache_indices.numel() == 0) { // nothing to do return; } // Get unqiue indices Tensor unique_indices; Tensor unique_indices_length; c10::optional<Tensor> unique_indices_count; std::tie(unique_indices, unique_indices_length, unique_indices_count) = get_unique_indices_cuda( linear_cache_indices, total_cache_hash_size, false); // Find uncached indices auto cache_sets_and_unique_indices = lru_cache_find_uncached_cuda( unique_indices, unique_indices_length, total_cache_hash_size, lxu_cache_state, time_stamp, lru_state); auto sorted_cache_sets = cache_sets_and_unique_indices.first; auto cache_set_sorted_unique_indices = cache_sets_and_unique_indices.second; // insert caching weights lru_cache_insert_cuda( weights, cache_hash_size_cumsum, cache_index_table_map, weights_offsets, D_offsets, sorted_cache_sets, cache_set_sorted_unique_indices, unique_indices_length, lxu_cache_state, lxu_cache_weights, time_stamp, lru_state, stochastic_rounding); } __global__ __launch_bounds__(kMaxThreads) void lfu_update_counts_kernel( const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> unique_indices, const int32_t* __restrict__ N_unique, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> unique_indices_count, PackedTensorAccessor64<int64_t, 1, RestrictPtrTraits> lfu_state) { int32_t n = blockIdx.x * blockDim.x + threadIdx.x; if (n >= *N_unique) { return; } int64_t idx = unique_indices[n]; lfu_state[idx] += unique_indices_count[n]; } void lfu_update_counts_cuda( Tensor unique_indices, Tensor unique_indices_length, Tensor unique_indices_count, Tensor lfu_state) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(unique_indices.get_device()); int32_t N = unique_indices.size(0); lfu_update_counts_kernel<<< div_round_up(N, kMaxThreads), kMaxThreads, 0, at::cuda::getCurrentCUDAStream()>>>( unique_indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), unique_indices_length.data_ptr<int32_t>(), unique_indices_count.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), lfu_state.packed_accessor64<int64_t, 1, RestrictPtrTraits>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); } constexpr int32_t kCacheSetBits = 24; constexpr int32_t kLFUCounterBits = 40; static_assert(kCacheSetBits + kLFUCounterBits == 8 * sizeof(int64_t), ""); __global__ __launch_bounds__(kMaxThreads) void lfu_cache_find_uncached_kernel( const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> unique_indices, const int32_t* __restrict__ N_unique, int64_t max_indices, const PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lxu_cache_state, uint64_t* __restrict__ cache_sets, const PackedTensorAccessor64<int64_t, 1, RestrictPtrTraits> lfu_state) { int32_t N = unique_indices.size(0); int32_t C = lxu_cache_state.size(0); int32_t n = blockIdx.x * blockDim.y + threadIdx.y; if (n >= N) { return; } if (n >= *N_unique) { if (threadIdx.x == 0) { cache_sets[n] = (static_cast<uint64_t>(C) << kLFUCounterBits); // invalid index, used as sentinel } return; } int64_t idx = unique_indices[n]; if (idx == max_indices) { if (threadIdx.x == 0) { cache_sets[n] = (static_cast<uint64_t>(C) << kLFUCounterBits); // invalid index, used as sentinel } return; } uint32_t cache_set = cache_slot(idx, C); auto slot = threadIdx.x; bool found = __ldg((&lxu_cache_state[cache_set][0]) + slot) == idx; if (found) { // mark it as existing. cache_sets[n] = (static_cast<uint64_t>(C) << kLFUCounterBits); // invalid index, used as sentinel } if (!__any_sync(0xFFFFFFFF, found)) { if (threadIdx.x == 0) { // sort so the highest LFUs come first in the segment. // assume lfu_state[idx] <= 2^40 - 1 and cache_set < 2^24 -1 cache_sets[n] = ((static_cast<uint64_t>(cache_set) << kLFUCounterBits)) | ((static_cast<uint64_t>(1) << kLFUCounterBits) - 1 - lfu_state[idx]); } } } std::pair<Tensor, Tensor> lfu_cache_find_uncached_cuda( Tensor unique_indices, Tensor unique_indices_length, int64_t max_indices, Tensor lxu_cache_state, Tensor lfu_state) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(unique_indices.get_device()); auto cache_sets = empty_like(unique_indices, unique_indices.options().dtype(kLong)); int32_t N = unique_indices.numel(); auto sorted_cache_sets = empty_like(cache_sets); auto cache_set_sorted_unique_indices = empty_like(unique_indices); // Find uncached indices lfu_cache_find_uncached_kernel<<< div_round_up(N, kMaxThreads / kWarpSize), dim3(kWarpSize, kMaxThreads / kWarpSize), 0, at::cuda::getCurrentCUDAStream()>>>( unique_indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), unique_indices_length.data_ptr<int32_t>(), max_indices, lxu_cache_state.packed_accessor32<int64_t, 2, RestrictPtrTraits>(), (uint64_t*)cache_sets.data_ptr<int64_t>(), lfu_state.packed_accessor64<int64_t, 1, RestrictPtrTraits>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); // Sort the cache sets and ids size_t temp_storage_bytes = 0; AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, (uint64_t*)cache_sets.data_ptr<int64_t>(), (uint64_t*)sorted_cache_sets.data_ptr<int64_t>(), unique_indices.data_ptr<int64_t>(), cache_set_sorted_unique_indices.data_ptr<int64_t>(), N, 0, int(log2(float(lxu_cache_state.size(0) + 1)) + 1) + kLFUCounterBits, at::cuda::getCurrentCUDAStream(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, unique_indices.options().dtype(kByte)); AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs( temp_storage.data_ptr(), temp_storage_bytes, (uint64_t*)cache_sets.data_ptr<int64_t>(), (uint64_t*)sorted_cache_sets.data_ptr<int64_t>(), unique_indices.data_ptr<int64_t>(), cache_set_sorted_unique_indices.data_ptr<int64_t>(), N, 0, int(log2(float(lxu_cache_state.size(0) + 1)) + 1) + kLFUCounterBits, at::cuda::getCurrentCUDAStream(), false)); return {sorted_cache_sets, cache_set_sorted_unique_indices}; } template <typename emb_t, typename cache_t> __global__ __launch_bounds__(kCacheMaxThreads) void lfu_cache_insert_kernel( PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> weights, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> cache_hash_size_cumsum, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> cache_index_table_map, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets, const uint64_t* __restrict__ sorted_cache_sets, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> cache_set_sorted_indices, const int32_t* __restrict__ N_unique, PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lxu_cache_state, PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits> lxu_cache_weights, const PackedTensorAccessor64<int64_t, 1, RestrictPtrTraits> lfu_state, bool stochastic_rounding, PhiloxCudaState stochastic_rounding_philox_args) { int32_t C = lxu_cache_state.size(0); int32_t n = blockIdx.x * blockDim.y + threadIdx.y; if (n >= *N_unique) { return; } // check if this warp is responsible for this whole segment. bool segment_start = (n == 0 || (sorted_cache_sets[n - 1] >> kLFUCounterBits) != (sorted_cache_sets[n] >> kLFUCounterBits)); if (!segment_start) { // don't have *warp* divergence since we launch full warps in blockDim.x, // so we can just exit this warp entirely. return; } uint32_t cache_set = (sorted_cache_sets[n] >> kLFUCounterBits); if (cache_set == C) { // ignore the already-existing elements return; } int32_t SL = 1; while (n + SL < *N_unique && (sorted_cache_sets[n + SL] >> kLFUCounterBits) == cache_set) { SL += 1; } // now, we need to insert the (unique!) values in indices[n:n + SL] into // our slots. int32_t slot = threadIdx.x; int64_t current_idx = lxu_cache_state[cache_set][slot]; int64_t current_lfu_cost = (current_idx != static_cast<int64_t>(kCacheStateInvalid)) ? lfu_state[current_idx] : -1; int64_t costs[1] = {current_lfu_cost}; int32_t slots[1] = {slot}; BitonicSort<int64_t, int32_t, 1, Comparator<int64_t>>::sort(costs, slots); int32_t sorted_slot = slots[0]; int64_t sorted_lfu_cost = costs[0]; for (int32_t l = 0; l < min(SL, kWarpSize); ++l) { int32_t insert_slot = __shfl_sync(0xFFFFFFFF, sorted_slot, l); int64_t insert_current_lfu_cost = __shfl_sync(0xFFFFFFFF, sorted_lfu_cost, l); int64_t insert_idx = cache_set_sorted_indices[n + l]; int64_t insert_lfu_cost = lfu_state[insert_idx]; if (insert_current_lfu_cost > insert_lfu_cost) { // don't insert. // all subsequent `current_lfu_cost` values are greater, and all // subsequent `insert_lfu_cost` values are smaller, so we can exit // early here. return; } int32_t t_insert = cache_index_table_map[insert_idx]; int64_t idx_insert = insert_idx - cache_hash_size_cumsum[t_insert]; int64_t weights_offset_insert = weights_offsets[t_insert]; int32_t D_start_insert = D_offsets[t_insert]; int32_t D_end_insert = D_offsets[t_insert + 1]; int32_t D_insert = D_end_insert - D_start_insert; // not empty if (insert_current_lfu_cost != -1) { // ensure that threadIdx.x is the only thread reading/writing to // lxu_cache_state int64_t current_idx = threadIdx.x == 0 ? lxu_cache_state[cache_set][insert_slot] : 0; current_idx = __shfl_sync(0xFFFFFFFF, current_idx, 0); int32_t t_current = cache_index_table_map[current_idx]; int64_t idx_current = current_idx - cache_hash_size_cumsum[t_current]; int64_t weights_offset_current = weights_offsets[t_current]; int32_t D_start_current = D_offsets[t_current]; int32_t D_end_current = D_offsets[t_current + 1]; int32_t D_current = D_end_current - D_start_current; int32_t D_emb = D_current; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } auto weight_row = WeightRow<emb_t, cache_t, cache_t>( &weights[weights_offset_current + idx_current * D_emb + 0], &lxu_cache_weights[cache_set * kWarpSize + insert_slot][0], D_current, nullptr); if (!std::is_same<emb_t, float>::value && stochastic_rounding) { StochasticRoundingRNGState state; // different for every *run* and every *thread*. auto stochastic_rounding_seeds = at::cuda::philox::unpack(stochastic_rounding_philox_args); stochastic_rounding_init( std::get<0>(stochastic_rounding_seeds) ^ std::get<1>(stochastic_rounding_seeds), (blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x) * kWarpSize + l, &state); weight_row.set_stoc_state(&state); } float2 qparams; acc_type<cache_t, true> local_min = std::numeric_limits<acc_type<cache_t, true>>::max(); acc_type<cache_t, true> local_max = std::numeric_limits<acc_type<cache_t, true>>::lowest(); if (std::is_same<emb_t, uint8_t>::value) { for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) { Vec4T<cache_t> cache_weights_vec = weight_row.load(d * 4, qparams); // qparams not used local_max = max(local_max, vec4_max(cache_weights_vec)); local_min = min(local_min, vec4_min(cache_weights_vec)); } qparams = warp_find_qparams(local_min, local_max); if (threadIdx.x == 0) { weight_row.store_qparams(qparams); } } for (int32_t d = threadIdx.x; d * 4 < D_current; d += blockDim.x) { Vec4T<cache_t> cache_weights_vec = weight_row.load(d * 4, qparams); weight_row.evict(cache_weights_vec, d * 4, qparams); } } // insert into cache int32_t D_emb = D_insert; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } auto weight_row_cache = WeightRow<emb_t, cache_t, cache_t>( &weights[weights_offset_insert + idx_insert * D_emb + 0], &lxu_cache_weights[cache_set * kWarpSize + insert_slot][0], D_insert, nullptr); auto weight_row_emb = WeightRow<emb_t, cache_t, cache_t>( &weights[weights_offset_insert + idx_insert * D_emb + 0], nullptr, D_insert, nullptr); float2 qparams; if (std::is_same<emb_t, uint8_t>::value) { qparams = weight_row_emb.load_qparams(); } for (int32_t d = threadIdx.x; d * 4 < D_insert; d += blockDim.x) { auto row = weight_row_emb.load(d * 4, qparams); weight_row_cache.store(row, d * 4, qparams); } if (threadIdx.x == 0) { lxu_cache_state[cache_set][insert_slot] = insert_idx; } } } void lfu_cache_insert_cuda( Tensor weights, Tensor cache_hash_size_cumsum, Tensor cache_index_table_map, Tensor weights_offsets, Tensor D_offsets, Tensor sorted_cache_sets, Tensor cache_set_sorted_unique_indices, Tensor unique_indices_length, Tensor lxu_cache_state, Tensor lxu_cache_weights, Tensor lfu_state, bool stochastic_rounding) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(weights.get_device()); int32_t N = cache_set_sorted_unique_indices.numel(); DISPATCH_EMB_CACHE_TYPES( weights.type(), lxu_cache_weights.type(), "lfu_cache_insert_kernel_2", ([&] { PhiloxCudaState rng_engine_inputs; if (stochastic_rounding && !std::is_same<emb_t, float>::value) { auto gen = at::cuda::detail::getDefaultCUDAGenerator(); std::lock_guard<std::mutex> lock(gen.mutex()); rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen) ->philox_cuda_state(4); } lfu_cache_insert_kernel<emb_t, cache_t> <<<div_round_up(N, kCacheMaxThreads / kWarpSize), dim3(kWarpSize, kCacheMaxThreads / kWarpSize), 0, at::cuda::getCurrentCUDAStream()>>>( weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(), cache_hash_size_cumsum .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), cache_index_table_map .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), weights_offsets .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), (uint64_t*)sorted_cache_sets.data_ptr<int64_t>(), cache_set_sorted_unique_indices .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), unique_indices_length.data_ptr<int32_t>(), lxu_cache_state .packed_accessor32<int64_t, 2, RestrictPtrTraits>(), lxu_cache_weights .packed_accessor64<cache_t, 2, RestrictPtrTraits>(), lfu_state.packed_accessor64<int64_t, 1, RestrictPtrTraits>(), stochastic_rounding, rng_engine_inputs); })); C10_CUDA_KERNEL_LAUNCH_CHECK(); } void lfu_cache_populate_cuda( Tensor weights, Tensor cache_hash_size_cumsum, int64_t total_cache_hash_size, Tensor cache_index_table_map, Tensor weights_offsets, Tensor D_offsets, Tensor linear_cache_indices, Tensor lxu_cache_state, Tensor lxu_cache_weights, Tensor lfu_state, bool stochastic_rounding) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(weights.get_device()); TORCH_CHECK( linear_cache_indices.numel() < std::numeric_limits<int32_t>::max()); if (linear_cache_indices.numel() == 0) { // nothing to do return; } // get unqiue indices Tensor unique_indices; Tensor unique_indices_length; c10::optional<Tensor> unique_indices_count; std::tie(unique_indices, unique_indices_length, unique_indices_count) = get_unique_indices_cuda( linear_cache_indices, total_cache_hash_size, true); // update lfu counts lfu_update_counts_cuda( unique_indices, unique_indices_length, *unique_indices_count, lfu_state); // find uncached indices auto cache_sets_and_unique_indices = lfu_cache_find_uncached_cuda( unique_indices, unique_indices_length, total_cache_hash_size, lxu_cache_state, lfu_state); auto sorted_cache_sets = cache_sets_and_unique_indices.first; auto cache_set_sorted_unique_indices = cache_sets_and_unique_indices.second; // insert caching weights lfu_cache_insert_cuda( weights, cache_hash_size_cumsum, cache_index_table_map, weights_offsets, D_offsets, sorted_cache_sets, cache_set_sorted_unique_indices, unique_indices_length, lxu_cache_state, lxu_cache_weights, lfu_state, stochastic_rounding); } __global__ __launch_bounds__(kMaxThreads) void lxu_cache_lookup_kernel( const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> linear_cache_indices, const PackedTensorAccessor32<int64_t, 2, RestrictPtrTraits> lxu_cache_state, PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> lxu_cache_locations) { const int32_t C = lxu_cache_state.size(0); const int32_t N = linear_cache_indices.size(0); int32_t n = blockIdx.x * blockDim.y + threadIdx.y; if (n >= N) { return; } int64_t idx = linear_cache_indices[n]; int32_t cache_set = cache_slot(idx, C); auto slot = threadIdx.x; bool found = (__ldg((&lxu_cache_state[cache_set][0]) + slot) == idx); if (found) { lxu_cache_locations[n] = cache_set * kWarpSize + slot; } if (!__any_sync(0xFFFFFFFF, found)) { if (threadIdx.x == 0) { lxu_cache_locations[n] = kCacheLocationMissing; } } } Tensor lxu_cache_lookup_cuda( Tensor linear_cache_indices, Tensor lxu_cache_state) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(linear_cache_indices.get_device()); const auto N = linear_cache_indices.numel(); auto lxu_cache_locations = empty_like( linear_cache_indices, linear_cache_indices.options().dtype(kInt)); if (linear_cache_indices.numel() == 0) { // nothing to do return lxu_cache_locations; } const dim3 threads(kWarpSize, kMaxThreads / kWarpSize); const dim3 blocks(div_round_up(N, kMaxThreads / kWarpSize)); lxu_cache_lookup_kernel<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( linear_cache_indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), lxu_cache_state.packed_accessor32<int64_t, 2, RestrictPtrTraits>(), lxu_cache_locations.packed_accessor32<int32_t, 1, RestrictPtrTraits>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); return lxu_cache_locations; }
d65839520007d2f627d8240c0dddf669b9b2d77d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void mult(int* A,int* B,int* C) { int x = threadIdx.x; int y = threadIdx.y; if ( x >= N || y >= M ) return; for(int i=0,j=0; i < N && j < M ; i++, j++) { C[x*N+y] += A[x*N+j]*B[i*N+y]; } }
d65839520007d2f627d8240c0dddf669b9b2d77d.cu
#include "includes.h" __global__ void mult(int* A,int* B,int* C) { int x = threadIdx.x; int y = threadIdx.y; if ( x >= N || y >= M ) return; for(int i=0,j=0; i < N && j < M ; i++, j++) { C[x*N+y] += A[x*N+j]*B[i*N+y]; } }
816fd7db9cae9331a01a825ce2134ce60b4dd39c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ExampleUpdater_hip.cuh" /*! \file ExampleUpdater.cu \brief CUDA kernels for ExampleUpdater */ // First, the kernel code for zeroing the velocities on the GPU //! Kernel that zeroes velocities on the GPU /*! \param d_vel Velocity-mass array from the ParticleData \param N Number of particles This kernel executes one thread per particle and zeros the velocity of each. It can be run with any 1D block size as long as block_size * num_blocks is >= the number of particles. */ extern "C" __global__ void gpu_zero_velocities_kernel(Scalar4 *d_vel, unsigned int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { // vel.w is the mass, don't want to modify that Scalar4 vel = d_vel[idx]; vel.x = vel.y = vel.z = 0.0f; d_vel[idx] = vel; } } /*! \param d_vel Velocity-mass array from the ParticleData \param N Number of particles This is just a driver for gpu_zero_velocities_kernel(), see it for the details */ hipError_t gpu_zero_velocities(Scalar4 *d_vel, unsigned int N) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (int)ceil((double)N / (double)block_size), 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( gpu_zero_velocities_kernel), dim3(grid), dim3(threads) , 0, 0, d_vel, N); // this method always succeeds. If you had a cuda* call in this driver, you could return its error code if not // hipSuccess return hipSuccess; }
816fd7db9cae9331a01a825ce2134ce60b4dd39c.cu
// Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ExampleUpdater.cuh" /*! \file ExampleUpdater.cu \brief CUDA kernels for ExampleUpdater */ // First, the kernel code for zeroing the velocities on the GPU //! Kernel that zeroes velocities on the GPU /*! \param d_vel Velocity-mass array from the ParticleData \param N Number of particles This kernel executes one thread per particle and zeros the velocity of each. It can be run with any 1D block size as long as block_size * num_blocks is >= the number of particles. */ extern "C" __global__ void gpu_zero_velocities_kernel(Scalar4 *d_vel, unsigned int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { // vel.w is the mass, don't want to modify that Scalar4 vel = d_vel[idx]; vel.x = vel.y = vel.z = 0.0f; d_vel[idx] = vel; } } /*! \param d_vel Velocity-mass array from the ParticleData \param N Number of particles This is just a driver for gpu_zero_velocities_kernel(), see it for the details */ cudaError_t gpu_zero_velocities(Scalar4 *d_vel, unsigned int N) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (int)ceil((double)N / (double)block_size), 1, 1); dim3 threads(block_size, 1, 1); // run the kernel gpu_zero_velocities_kernel<<< grid, threads >>>(d_vel, N); // this method always succeeds. If you had a cuda* call in this driver, you could return its error code if not // cudaSuccess return cudaSuccess; }
0d4de25c83b921463fff3a67f60ebcb789a7b70a.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/host_vector.h> #include <thrust/random.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <hip/hip_runtime.h> #include <assert.h> #include <algorithm> #include <omp.h> #include <fstream> #define KEPLER 0 #include "ErrorCheck.h" #include "include/encode.cuh" #include "include/decode.cuh" #include "include/cuZFP.cuh" #include "zfparray3.h" enum ENGHS_t{ N_LEFT, N_RIGHT, N_UP, N_DOWN, N_NEAR, N_FAR } enghs; using namespace thrust; using namespace std; #define index(x, y, z) ((x) + 4 * ((y) + 4 * (z))) const size_t nx = 512; const size_t ny = 512; const size_t nz = 512; const int nt = 0; const double pi = 3.14159265358979323846; //BSIZE is the length of the array in class Bit //It's tied to MAXBITS such that //MAXBITS = sizeof(Word) * BSIZE //which is really //MAXBITS = wsize * BSIZE //e.g. if we match bits one-to-one, double -> unsigned long long // then BSIZE = 64 and MAXPBITS = 4096 #define BSIZE 16 uint minbits = BSIZE*64; uint MAXBITS = BSIZE*64; uint MAXPREC = 64; int MINEXP = -1074; const double rate = BSIZE; size_t blksize = 0; unsigned long long group_count = 0x46acca631ull; uint size = 64; int EBITS = 11; /* number of exponent bits */ const int EBIAS = 1023; const int intprec = 64; static const unsigned char perm[64] = { index(0, 0, 0), // 0 : 0 index(1, 0, 0), // 1 : 1 index(0, 1, 0), // 2 : 1 index(0, 0, 1), // 3 : 1 index(0, 1, 1), // 4 : 2 index(1, 0, 1), // 5 : 2 index(1, 1, 0), // 6 : 2 index(2, 0, 0), // 7 : 2 index(0, 2, 0), // 8 : 2 index(0, 0, 2), // 9 : 2 index(1, 1, 1), // 10 : 3 index(2, 1, 0), // 11 : 3 index(2, 0, 1), // 12 : 3 index(0, 2, 1), // 13 : 3 index(1, 2, 0), // 14 : 3 index(1, 0, 2), // 15 : 3 index(0, 1, 2), // 16 : 3 index(3, 0, 0), // 17 : 3 index(0, 3, 0), // 18 : 3 index(0, 0, 3), // 19 : 3 index(2, 1, 1), // 20 : 4 index(1, 2, 1), // 21 : 4 index(1, 1, 2), // 22 : 4 index(0, 2, 2), // 23 : 4 index(2, 0, 2), // 24 : 4 index(2, 2, 0), // 25 : 4 index(3, 1, 0), // 26 : 4 index(3, 0, 1), // 27 : 4 index(0, 3, 1), // 28 : 4 index(1, 3, 0), // 29 : 4 index(1, 0, 3), // 30 : 4 index(0, 1, 3), // 31 : 4 index(1, 2, 2), // 32 : 5 index(2, 1, 2), // 33 : 5 index(2, 2, 1), // 34 : 5 index(3, 1, 1), // 35 : 5 index(1, 3, 1), // 36 : 5 index(1, 1, 3), // 37 : 5 index(3, 2, 0), // 38 : 5 index(3, 0, 2), // 39 : 5 index(0, 3, 2), // 40 : 5 index(2, 3, 0), // 41 : 5 index(2, 0, 3), // 42 : 5 index(0, 2, 3), // 43 : 5 index(2, 2, 2), // 44 : 6 index(3, 2, 1), // 45 : 6 index(3, 1, 2), // 46 : 6 index(1, 3, 2), // 47 : 6 index(2, 3, 1), // 48 : 6 index(2, 1, 3), // 49 : 6 index(1, 2, 3), // 50 : 6 index(0, 3, 3), // 51 : 6 index(3, 0, 3), // 52 : 6 index(3, 3, 0), // 53 : 6 index(3, 2, 2), // 54 : 7 index(2, 3, 2), // 55 : 7 index(2, 2, 3), // 56 : 7 index(1, 3, 3), // 57 : 7 index(3, 1, 3), // 58 : 7 index(3, 3, 1), // 59 : 7 index(2, 3, 3), // 60 : 8 index(3, 2, 3), // 61 : 8 index(3, 3, 2), // 62 : 8 index(3, 3, 3), // 63 : 9 }; static size_t block_size(double rate) { return (lrint(64 * rate) + CHAR_BIT - 1) / CHAR_BIT; } template<class Scalar> void setupConst(const unsigned char *perm, uint maxbits_, uint maxprec_, int minexp_, int ebits_, int ebias_ ) { ErrorCheck ec; ec.chk("setupConst start"); hipMemcpyToSymbol(c_perm, perm, sizeof(unsigned char) * 64, 0); ec.chk("setupConst: c_perm"); hipMemcpyToSymbol(c_maxbits, &MAXBITS, sizeof(uint)); ec.chk("setupConst: c_maxbits"); const uint sizeof_scalar = sizeof(Scalar); hipMemcpyToSymbol(c_sizeof_scalar, &sizeof_scalar, sizeof(uint)); ec.chk("setupConst: c_sizeof_scalar"); hipMemcpyToSymbol(c_maxprec, &maxprec_, sizeof(uint)); ec.chk("setupConst: c_maxprec"); hipMemcpyToSymbol(c_minexp, &minexp_, sizeof(int)); ec.chk("setupConst: c_minexp"); hipMemcpyToSymbol(c_ebits, &ebits_, sizeof(int)); ec.chk("setupConst: c_ebits"); hipMemcpyToSymbol(c_ebias, &ebias_, sizeof(int)); ec.chk("setupConst: c_ebias"); ec.chk("setupConst finished"); } //Used to generate rand array in CUDA with Thrust struct RandGen { RandGen() {} __device__ float operator () (const uint idx) { thrust::default_random_engine randEng; thrust::uniform_real_distribution<float> uniDist(0.0, 0.0001); randEng.discard(idx); return uniDist(randEng); } }; __device__ static inline int idx(int x, int y, int z) { return x + y * (blockDim.x * gridDim.x) + z * (blockDim.x * gridDim.x * blockDim.y * gridDim.y); } template<typename Scalar> __global__ void cudaDiffusion ( const Scalar *u, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k, const Scalar tfinal, Scalar *du ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int z = threadIdx.z + blockIdx.z * blockDim.z; Scalar uxx = (u[idx(max(0, x - 1), y, z)] - 2 * u[idx(x, y, z)] + u[idx(min(blockDim.x*gridDim.x - 1, x + 1), y, z)]) / (dx * dx); Scalar uyy = (u[idx(x, max(0, y - 1), z)] - 2 * u[idx(x, y, z)] + u[idx(x, min(blockDim.y*gridDim.y - 1, y + 1), z)]) / (dy * dy); Scalar uzz = (u[idx(x, y, max(0, z - 1))] - 2 * u[idx(x, y, z)] + u[idx(x, y, min(blockDim.z*gridDim.z-1, z + 1))]) / (dz * dz); du[idx(x, y, z)] = dt * k * (uxx + uyy + uzz); } template<typename Scalar> __global__ void cudaSum ( Scalar *u, const Scalar *du ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int z = threadIdx.z + blockIdx.z * blockDim.z; u[idx(x, y, z)] += du[idx(x, y, z)]; } template<class Int, class UInt, class Scalar, uint bsize, int intprec> __global__ void __launch_bounds__(64, 5) cudaZFPDiffusion ( const Scalar *u, Word *du, uint size, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k ) { uint x = threadIdx.x; uint y = threadIdx.y; uint z = threadIdx.z; uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y; uint bidx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x); uint bdim = blockDim.x*blockDim.y*blockDim.z; uint tbidx = bidx*bdim; extern __shared__ unsigned char smem[]; __shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext; s_u = (Scalar*)&smem[0]; s_du = (Scalar*)&s_u[64]; s_u_ext = (Scalar*)&s_du[64]; s_nghs = (Scalar*)&s_u_ext[216]; unsigned char *new_smem = (unsigned char*)&s_nghs[64]; //cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + bidx*bsize, new_smem, tid, s_u); //cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + bidx*bsize, new_smem, tid, s_du); //__syncthreads(); int3 utid = make_int3(threadIdx.x + blockDim.x * blockIdx.x, threadIdx.y + blockDim.y * blockIdx.y, threadIdx.z + blockDim.z * blockIdx.z); Scalar uxx = (u[idx(max(0, utid.x - 1), utid.y, utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(min(blockDim.x*gridDim.x - 1, utid.x + 1), utid.y, utid.z)]) / (dx * dx); Scalar uyy = (u[idx(utid.x, max(0, utid.y - 1), utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, min(blockDim.y*gridDim.y - 1, utid.y + 1), utid.z)]) / (dy * dy); Scalar uzz = (u[idx(utid.x, utid.y, max(0, utid.z - 1))] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, utid.y, min(blockDim.z*gridDim.z - 1, utid.z + 1))]) / (dz * dz); s_du[tid] = dt*k * (uxx + uyy + uzz); __syncthreads(); cuZFP::encode<Int, UInt, Scalar, bsize, intprec>( s_du, size, new_smem, bidx * bsize, du ); } template<class Int, class UInt, class Scalar, uint bsize, int intprec> __global__ void __launch_bounds__(64, 5) cudaZFPDiffusion ( const Word *u, Word *du, uint size, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k ) { uint x = threadIdx.x; uint y = threadIdx.y; uint z = threadIdx.z; uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y; uint idx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x); uint bdim = blockDim.x*blockDim.y*blockDim.z; uint bidx = idx*bdim; extern __shared__ unsigned char smem[]; __shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext; s_u = (Scalar*)&smem[0]; s_du = (Scalar*)&s_u[64]; s_u_ext = (Scalar*)&s_du[64]; s_nghs = (Scalar*)&s_u_ext[216]; unsigned char *new_smem = (unsigned char*)&s_nghs[64]; cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + idx*bsize, new_smem, tid, s_u); //cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + idx*bsize, new_smem, tid, s_du); for (int i = 0; i < 3; i++){ s_u_ext[i * 64 + tid] = 0; } if (tid < 24) s_u_ext[192 + tid] = 0; __syncthreads(); //left s_nghs[tid] = 0; if (blockIdx.x > 0){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + ((blockIdx.x-1) + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[(i+1) * 6 + (j+1) * 36] = s_nghs[3 + i * blockDim.x + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[(x + 1) * 6 + (y + 1) * 36] = s_nghs[3 + x * blockDim.x + y * blockDim.x * blockDim.y]; } __syncthreads(); //right s_nghs[tid] = 0; if (blockIdx.x+1 < gridDim.x){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (1 + blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[5 + (i+1) * 6 + (j+1) * 36] = s_nghs[i*blockDim.x + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[5 + (x + 1) * 6 + (y + 1) * 36] = s_nghs[x*blockDim.x + y * blockDim.x * blockDim.y]; } __syncthreads(); //down s_nghs[tid] = 0; if (blockIdx.y > 0){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y - 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + (j+1) * 36] = s_nghs[i + 3*blockDim.x + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[1 + x + (y + 1) * 36] = s_nghs[x + 3 * blockDim.x + y * blockDim.x * blockDim.y]; } __syncthreads(); //up s_nghs[tid] = 0; if (blockIdx.y + 1 < gridDim.y){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y + 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + 5*6 + (j+1) * 36] = s_nghs[i + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[1 + x + 5 * 6 + (y + 1) * 36] = s_nghs[x + y * blockDim.x * blockDim.y]; } __syncthreads(); //near s_nghs[tid] = 0; if (blockIdx.z > 0){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z - 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + (j + 1) * 6] = s_nghs[i + (j)*blockDim.x + 3 * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[1 + x + (y + 1) * 6] = s_nghs[x + (y)*blockDim.x + 3 * blockDim.x * blockDim.y]; } __syncthreads(); //far s_nghs[tid] = 0; if (blockIdx.z + 1 < gridDim.z){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z + 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + (j + 1) * 6 + 5 * 36] = s_nghs[i + (j)*blockDim.x ]; // } // } //} if (z == 0){ s_u_ext[1 + x + (y + 1) * 6 + 5 * 36] = s_nghs[x + (y)*blockDim.x]; } __syncthreads(); s_u_ext[1 + x + (y + 1) * 6 + (z + 1) * 36] = s_u[tid]; __syncthreads(); Scalar uxx = (s_u_ext[x + (y + 1) * 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 2 + (y + 1) * 6 + (z + 1) * 36]) / (dx * dx); Scalar uyy = (s_u_ext[x + 1 + (y)* 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 2) * 6 + (z + 1) * 36]) / (dy * dy); Scalar uzz = (s_u_ext[x + 1 + (y + 1) * 6 + (z)* 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 1) * 6 + (z + 2) * 36]) / (dz * dz); s_du[tid] = dt*k * (uxx + uyy + uzz); __syncthreads(); //if (uxx < 0 || uyy < 0 || uzz < 0){ // printf("%d, %f, %f, %f, %f %f %f %d %d %d %d\n", tid, dt, k, s_du[tid], uxx, uyy, uzz, threadIdx.x + blockIdx.x * blockDim.x, threadIdx.y + blockIdx.y * blockDim.y, threadIdx.z + blockIdx.z * blockDim.z, threadIdx.x + blockIdx.x * blockDim.x + (threadIdx.y + blockIdx.y * blockDim.y)*gridDim.x * blockDim.x + (threadIdx.z + blockIdx.z * blockDim.z)*gridDim.x * blockDim.x * gridDim.y * blockDim.y); //} cuZFP::encode<Int, UInt, Scalar, bsize, intprec>( s_du, size, new_smem, idx * bsize, du ); //out[(threadIdx.z + blockIdx.z * 4)*gridDim.x * gridDim.y * blockDim.x * blockDim.y + (threadIdx.y + blockIdx.y * 4)*gridDim.x * blockDim.x + (threadIdx.x + blockIdx.x * 4)] = s_dblock[tid]; } template<class Int, class UInt, class Scalar, uint bsize, int intprec> void gpuZFPDiffusion ( int nx, int ny, int nz, device_vector<Word > &u, device_vector<Word > &du, device_vector<Scalar> &df_u, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k, const Scalar tfinal ) { dim3 block_size = dim3(4, 4, 4); dim3 grid_size = dim3(nx, ny, nz); grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> > ( thrust::raw_pointer_cast(u.data()), thrust::raw_pointer_cast(du.data()), size, dx,dy,dz,dt,k ); // cuZFP::decode<Int, UInt, Scalar, bsize, intprec>( // nx, ny, nz, // u, df_u, // group_count // ); //cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> > // ( // thrust::raw_pointer_cast(df_u.data()), // thrust::raw_pointer_cast(du.data()), // size, // dx,dy,dz,dt,k // ); cuZFP::transform <Int, UInt, Scalar, bsize, intprec> ( nx,ny,nz, size, u, du, thrust::plus<Scalar>() ); //Scalar sum_u = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u); //Scalar sum_du = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, du); //cout << "post-transform du: " << sum_du << " u: " << sum_u << endl; } template<class Int, class UInt, class Scalar, uint bsize> void gpuEncode(host_vector<Scalar> &h_u) { device_vector<Scalar> d_u; d_u = h_u; ErrorCheck ec; hipEvent_t start, stop; float millisecs; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); dim3 emax_size(nx / 4, ny / 4, nz / 4); device_vector<Word > u(emax_size.x * emax_size.y * emax_size.z * bsize); cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, d_u, u, group_count, size); hipStreamSynchronize(0); ec.chk("cudaEncode"); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&millisecs, start, stop); ec.chk("cudaencode"); cout << "encode GPU in time: " << millisecs/1000.0 << endl; cout << "sum: " << cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u) << endl; double tot_sum = 0, max_diff = 0, min_diff = 1e16; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u, d_u, group_count); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&millisecs, start, stop); ec.chk("cudadecoe"); cout << "decode GPU in time: " << millisecs / 1000.0 << endl; host_vector<Scalar> h_out = d_u; //array3d out(nx, ny, nz, rate); //for (int i = 0; i < h_out.size(); i++){ // out[i] = h_out[i]; //} } int main() { host_vector<double> h_vec_in(nx*ny*nz, 0); ifstream ifs("../../h512_0171_little_endian.raw", ios::binary); if (ifs) { double read; for (int i = 0; i < nx*ny*nz; i++){ ifs.read(reinterpret_cast<char*>(&read), sizeof read); h_vec_in[i] = read; } } ifs.close(); cout << "cpu encode start" << endl; double start_time = omp_get_wtime(); zfp::array3d u(nx, ny, nz, rate); for (int i = 0; i < nx*ny*nz; i++){ u[i] = h_vec_in[i]; } double time = omp_get_wtime() - start_time; cout << "decode cpu time: " << time << endl; host_vector<double> h_vec_out(nx*ny*nz, 0); cout << "cpu decode start" << endl; start_time = omp_get_wtime(); for (int z = 0; z < nz; z++){ for (int y = 0; y < ny; y++) { for (int x = 0; x < nx; x++) { h_vec_out[z*nx*ny + y*nx + x] = u(x, y, z); } } } time = omp_get_wtime() - start_time; cout << "decode cpu time: " << time << endl; cout << "sum: " << thrust::reduce(h_vec_out.begin(), h_vec_out.end()) << endl; cout << "GPU ZFP encode start" << endl; hipDeviceSetCacheConfig(hipFuncCachePreferL1); setupConst<double>(perm, MAXBITS, MAXPREC, MINEXP, EBITS, EBIAS); cout << "Begin gpuDiffusion" << endl; gpuEncode<long long, unsigned long long, double, BSIZE>(h_vec_in); cout << "Finish gpuDiffusion" << endl; }
0d4de25c83b921463fff3a67f60ebcb789a7b70a.cu
#include <iostream> #include <iomanip> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/host_vector.h> #include <thrust/random.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <cuda_runtime.h> #include <assert.h> #include <algorithm> #include <omp.h> #include <fstream> #define KEPLER 0 #include "ErrorCheck.h" #include "include/encode.cuh" #include "include/decode.cuh" #include "include/cuZFP.cuh" #include "zfparray3.h" enum ENGHS_t{ N_LEFT, N_RIGHT, N_UP, N_DOWN, N_NEAR, N_FAR } enghs; using namespace thrust; using namespace std; #define index(x, y, z) ((x) + 4 * ((y) + 4 * (z))) const size_t nx = 512; const size_t ny = 512; const size_t nz = 512; const int nt = 0; const double pi = 3.14159265358979323846; //BSIZE is the length of the array in class Bit //It's tied to MAXBITS such that //MAXBITS = sizeof(Word) * BSIZE //which is really //MAXBITS = wsize * BSIZE //e.g. if we match bits one-to-one, double -> unsigned long long // then BSIZE = 64 and MAXPBITS = 4096 #define BSIZE 16 uint minbits = BSIZE*64; uint MAXBITS = BSIZE*64; uint MAXPREC = 64; int MINEXP = -1074; const double rate = BSIZE; size_t blksize = 0; unsigned long long group_count = 0x46acca631ull; uint size = 64; int EBITS = 11; /* number of exponent bits */ const int EBIAS = 1023; const int intprec = 64; static const unsigned char perm[64] = { index(0, 0, 0), // 0 : 0 index(1, 0, 0), // 1 : 1 index(0, 1, 0), // 2 : 1 index(0, 0, 1), // 3 : 1 index(0, 1, 1), // 4 : 2 index(1, 0, 1), // 5 : 2 index(1, 1, 0), // 6 : 2 index(2, 0, 0), // 7 : 2 index(0, 2, 0), // 8 : 2 index(0, 0, 2), // 9 : 2 index(1, 1, 1), // 10 : 3 index(2, 1, 0), // 11 : 3 index(2, 0, 1), // 12 : 3 index(0, 2, 1), // 13 : 3 index(1, 2, 0), // 14 : 3 index(1, 0, 2), // 15 : 3 index(0, 1, 2), // 16 : 3 index(3, 0, 0), // 17 : 3 index(0, 3, 0), // 18 : 3 index(0, 0, 3), // 19 : 3 index(2, 1, 1), // 20 : 4 index(1, 2, 1), // 21 : 4 index(1, 1, 2), // 22 : 4 index(0, 2, 2), // 23 : 4 index(2, 0, 2), // 24 : 4 index(2, 2, 0), // 25 : 4 index(3, 1, 0), // 26 : 4 index(3, 0, 1), // 27 : 4 index(0, 3, 1), // 28 : 4 index(1, 3, 0), // 29 : 4 index(1, 0, 3), // 30 : 4 index(0, 1, 3), // 31 : 4 index(1, 2, 2), // 32 : 5 index(2, 1, 2), // 33 : 5 index(2, 2, 1), // 34 : 5 index(3, 1, 1), // 35 : 5 index(1, 3, 1), // 36 : 5 index(1, 1, 3), // 37 : 5 index(3, 2, 0), // 38 : 5 index(3, 0, 2), // 39 : 5 index(0, 3, 2), // 40 : 5 index(2, 3, 0), // 41 : 5 index(2, 0, 3), // 42 : 5 index(0, 2, 3), // 43 : 5 index(2, 2, 2), // 44 : 6 index(3, 2, 1), // 45 : 6 index(3, 1, 2), // 46 : 6 index(1, 3, 2), // 47 : 6 index(2, 3, 1), // 48 : 6 index(2, 1, 3), // 49 : 6 index(1, 2, 3), // 50 : 6 index(0, 3, 3), // 51 : 6 index(3, 0, 3), // 52 : 6 index(3, 3, 0), // 53 : 6 index(3, 2, 2), // 54 : 7 index(2, 3, 2), // 55 : 7 index(2, 2, 3), // 56 : 7 index(1, 3, 3), // 57 : 7 index(3, 1, 3), // 58 : 7 index(3, 3, 1), // 59 : 7 index(2, 3, 3), // 60 : 8 index(3, 2, 3), // 61 : 8 index(3, 3, 2), // 62 : 8 index(3, 3, 3), // 63 : 9 }; static size_t block_size(double rate) { return (lrint(64 * rate) + CHAR_BIT - 1) / CHAR_BIT; } template<class Scalar> void setupConst(const unsigned char *perm, uint maxbits_, uint maxprec_, int minexp_, int ebits_, int ebias_ ) { ErrorCheck ec; ec.chk("setupConst start"); cudaMemcpyToSymbol(c_perm, perm, sizeof(unsigned char) * 64, 0); ec.chk("setupConst: c_perm"); cudaMemcpyToSymbol(c_maxbits, &MAXBITS, sizeof(uint)); ec.chk("setupConst: c_maxbits"); const uint sizeof_scalar = sizeof(Scalar); cudaMemcpyToSymbol(c_sizeof_scalar, &sizeof_scalar, sizeof(uint)); ec.chk("setupConst: c_sizeof_scalar"); cudaMemcpyToSymbol(c_maxprec, &maxprec_, sizeof(uint)); ec.chk("setupConst: c_maxprec"); cudaMemcpyToSymbol(c_minexp, &minexp_, sizeof(int)); ec.chk("setupConst: c_minexp"); cudaMemcpyToSymbol(c_ebits, &ebits_, sizeof(int)); ec.chk("setupConst: c_ebits"); cudaMemcpyToSymbol(c_ebias, &ebias_, sizeof(int)); ec.chk("setupConst: c_ebias"); ec.chk("setupConst finished"); } //Used to generate rand array in CUDA with Thrust struct RandGen { RandGen() {} __device__ float operator () (const uint idx) { thrust::default_random_engine randEng; thrust::uniform_real_distribution<float> uniDist(0.0, 0.0001); randEng.discard(idx); return uniDist(randEng); } }; __device__ static inline int idx(int x, int y, int z) { return x + y * (blockDim.x * gridDim.x) + z * (blockDim.x * gridDim.x * blockDim.y * gridDim.y); } template<typename Scalar> __global__ void cudaDiffusion ( const Scalar *u, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k, const Scalar tfinal, Scalar *du ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int z = threadIdx.z + blockIdx.z * blockDim.z; Scalar uxx = (u[idx(max(0, x - 1), y, z)] - 2 * u[idx(x, y, z)] + u[idx(min(blockDim.x*gridDim.x - 1, x + 1), y, z)]) / (dx * dx); Scalar uyy = (u[idx(x, max(0, y - 1), z)] - 2 * u[idx(x, y, z)] + u[idx(x, min(blockDim.y*gridDim.y - 1, y + 1), z)]) / (dy * dy); Scalar uzz = (u[idx(x, y, max(0, z - 1))] - 2 * u[idx(x, y, z)] + u[idx(x, y, min(blockDim.z*gridDim.z-1, z + 1))]) / (dz * dz); du[idx(x, y, z)] = dt * k * (uxx + uyy + uzz); } template<typename Scalar> __global__ void cudaSum ( Scalar *u, const Scalar *du ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int z = threadIdx.z + blockIdx.z * blockDim.z; u[idx(x, y, z)] += du[idx(x, y, z)]; } template<class Int, class UInt, class Scalar, uint bsize, int intprec> __global__ void __launch_bounds__(64, 5) cudaZFPDiffusion ( const Scalar *u, Word *du, uint size, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k ) { uint x = threadIdx.x; uint y = threadIdx.y; uint z = threadIdx.z; uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y; uint bidx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x); uint bdim = blockDim.x*blockDim.y*blockDim.z; uint tbidx = bidx*bdim; extern __shared__ unsigned char smem[]; __shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext; s_u = (Scalar*)&smem[0]; s_du = (Scalar*)&s_u[64]; s_u_ext = (Scalar*)&s_du[64]; s_nghs = (Scalar*)&s_u_ext[216]; unsigned char *new_smem = (unsigned char*)&s_nghs[64]; //cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + bidx*bsize, new_smem, tid, s_u); //cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + bidx*bsize, new_smem, tid, s_du); //__syncthreads(); int3 utid = make_int3(threadIdx.x + blockDim.x * blockIdx.x, threadIdx.y + blockDim.y * blockIdx.y, threadIdx.z + blockDim.z * blockIdx.z); Scalar uxx = (u[idx(max(0, utid.x - 1), utid.y, utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(min(blockDim.x*gridDim.x - 1, utid.x + 1), utid.y, utid.z)]) / (dx * dx); Scalar uyy = (u[idx(utid.x, max(0, utid.y - 1), utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, min(blockDim.y*gridDim.y - 1, utid.y + 1), utid.z)]) / (dy * dy); Scalar uzz = (u[idx(utid.x, utid.y, max(0, utid.z - 1))] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, utid.y, min(blockDim.z*gridDim.z - 1, utid.z + 1))]) / (dz * dz); s_du[tid] = dt*k * (uxx + uyy + uzz); __syncthreads(); cuZFP::encode<Int, UInt, Scalar, bsize, intprec>( s_du, size, new_smem, bidx * bsize, du ); } template<class Int, class UInt, class Scalar, uint bsize, int intprec> __global__ void __launch_bounds__(64, 5) cudaZFPDiffusion ( const Word *u, Word *du, uint size, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k ) { uint x = threadIdx.x; uint y = threadIdx.y; uint z = threadIdx.z; uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y; uint idx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x); uint bdim = blockDim.x*blockDim.y*blockDim.z; uint bidx = idx*bdim; extern __shared__ unsigned char smem[]; __shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext; s_u = (Scalar*)&smem[0]; s_du = (Scalar*)&s_u[64]; s_u_ext = (Scalar*)&s_du[64]; s_nghs = (Scalar*)&s_u_ext[216]; unsigned char *new_smem = (unsigned char*)&s_nghs[64]; cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + idx*bsize, new_smem, tid, s_u); //cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + idx*bsize, new_smem, tid, s_du); for (int i = 0; i < 3; i++){ s_u_ext[i * 64 + tid] = 0; } if (tid < 24) s_u_ext[192 + tid] = 0; __syncthreads(); //left s_nghs[tid] = 0; if (blockIdx.x > 0){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + ((blockIdx.x-1) + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[(i+1) * 6 + (j+1) * 36] = s_nghs[3 + i * blockDim.x + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[(x + 1) * 6 + (y + 1) * 36] = s_nghs[3 + x * blockDim.x + y * blockDim.x * blockDim.y]; } __syncthreads(); //right s_nghs[tid] = 0; if (blockIdx.x+1 < gridDim.x){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (1 + blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[5 + (i+1) * 6 + (j+1) * 36] = s_nghs[i*blockDim.x + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[5 + (x + 1) * 6 + (y + 1) * 36] = s_nghs[x*blockDim.x + y * blockDim.x * blockDim.y]; } __syncthreads(); //down s_nghs[tid] = 0; if (blockIdx.y > 0){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y - 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + (j+1) * 36] = s_nghs[i + 3*blockDim.x + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[1 + x + (y + 1) * 36] = s_nghs[x + 3 * blockDim.x + y * blockDim.x * blockDim.y]; } __syncthreads(); //up s_nghs[tid] = 0; if (blockIdx.y + 1 < gridDim.y){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y + 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + 5*6 + (j+1) * 36] = s_nghs[i + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[1 + x + 5 * 6 + (y + 1) * 36] = s_nghs[x + y * blockDim.x * blockDim.y]; } __syncthreads(); //near s_nghs[tid] = 0; if (blockIdx.z > 0){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z - 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + (j + 1) * 6] = s_nghs[i + (j)*blockDim.x + 3 * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[1 + x + (y + 1) * 6] = s_nghs[x + (y)*blockDim.x + 3 * blockDim.x * blockDim.y]; } __syncthreads(); //far s_nghs[tid] = 0; if (blockIdx.z + 1 < gridDim.z){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z + 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + (j + 1) * 6 + 5 * 36] = s_nghs[i + (j)*blockDim.x ]; // } // } //} if (z == 0){ s_u_ext[1 + x + (y + 1) * 6 + 5 * 36] = s_nghs[x + (y)*blockDim.x]; } __syncthreads(); s_u_ext[1 + x + (y + 1) * 6 + (z + 1) * 36] = s_u[tid]; __syncthreads(); Scalar uxx = (s_u_ext[x + (y + 1) * 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 2 + (y + 1) * 6 + (z + 1) * 36]) / (dx * dx); Scalar uyy = (s_u_ext[x + 1 + (y)* 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 2) * 6 + (z + 1) * 36]) / (dy * dy); Scalar uzz = (s_u_ext[x + 1 + (y + 1) * 6 + (z)* 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 1) * 6 + (z + 2) * 36]) / (dz * dz); s_du[tid] = dt*k * (uxx + uyy + uzz); __syncthreads(); //if (uxx < 0 || uyy < 0 || uzz < 0){ // printf("%d, %f, %f, %f, %f %f %f %d %d %d %d\n", tid, dt, k, s_du[tid], uxx, uyy, uzz, threadIdx.x + blockIdx.x * blockDim.x, threadIdx.y + blockIdx.y * blockDim.y, threadIdx.z + blockIdx.z * blockDim.z, threadIdx.x + blockIdx.x * blockDim.x + (threadIdx.y + blockIdx.y * blockDim.y)*gridDim.x * blockDim.x + (threadIdx.z + blockIdx.z * blockDim.z)*gridDim.x * blockDim.x * gridDim.y * blockDim.y); //} cuZFP::encode<Int, UInt, Scalar, bsize, intprec>( s_du, size, new_smem, idx * bsize, du ); //out[(threadIdx.z + blockIdx.z * 4)*gridDim.x * gridDim.y * blockDim.x * blockDim.y + (threadIdx.y + blockIdx.y * 4)*gridDim.x * blockDim.x + (threadIdx.x + blockIdx.x * 4)] = s_dblock[tid]; } template<class Int, class UInt, class Scalar, uint bsize, int intprec> void gpuZFPDiffusion ( int nx, int ny, int nz, device_vector<Word > &u, device_vector<Word > &du, device_vector<Scalar> &df_u, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k, const Scalar tfinal ) { dim3 block_size = dim3(4, 4, 4); dim3 grid_size = dim3(nx, ny, nz); grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> > ( thrust::raw_pointer_cast(u.data()), thrust::raw_pointer_cast(du.data()), size, dx,dy,dz,dt,k ); // cuZFP::decode<Int, UInt, Scalar, bsize, intprec>( // nx, ny, nz, // u, df_u, // group_count // ); //cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> > // ( // thrust::raw_pointer_cast(df_u.data()), // thrust::raw_pointer_cast(du.data()), // size, // dx,dy,dz,dt,k // ); cuZFP::transform <Int, UInt, Scalar, bsize, intprec> ( nx,ny,nz, size, u, du, thrust::plus<Scalar>() ); //Scalar sum_u = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u); //Scalar sum_du = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, du); //cout << "post-transform du: " << sum_du << " u: " << sum_u << endl; } template<class Int, class UInt, class Scalar, uint bsize> void gpuEncode(host_vector<Scalar> &h_u) { device_vector<Scalar> d_u; d_u = h_u; ErrorCheck ec; cudaEvent_t start, stop; float millisecs; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); dim3 emax_size(nx / 4, ny / 4, nz / 4); device_vector<Word > u(emax_size.x * emax_size.y * emax_size.z * bsize); cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, d_u, u, group_count, size); cudaStreamSynchronize(0); ec.chk("cudaEncode"); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&millisecs, start, stop); ec.chk("cudaencode"); cout << "encode GPU in time: " << millisecs/1000.0 << endl; cout << "sum: " << cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u) << endl; double tot_sum = 0, max_diff = 0, min_diff = 1e16; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u, d_u, group_count); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&millisecs, start, stop); ec.chk("cudadecoe"); cout << "decode GPU in time: " << millisecs / 1000.0 << endl; host_vector<Scalar> h_out = d_u; //array3d out(nx, ny, nz, rate); //for (int i = 0; i < h_out.size(); i++){ // out[i] = h_out[i]; //} } int main() { host_vector<double> h_vec_in(nx*ny*nz, 0); ifstream ifs("../../h512_0171_little_endian.raw", ios::binary); if (ifs) { double read; for (int i = 0; i < nx*ny*nz; i++){ ifs.read(reinterpret_cast<char*>(&read), sizeof read); h_vec_in[i] = read; } } ifs.close(); cout << "cpu encode start" << endl; double start_time = omp_get_wtime(); zfp::array3d u(nx, ny, nz, rate); for (int i = 0; i < nx*ny*nz; i++){ u[i] = h_vec_in[i]; } double time = omp_get_wtime() - start_time; cout << "decode cpu time: " << time << endl; host_vector<double> h_vec_out(nx*ny*nz, 0); cout << "cpu decode start" << endl; start_time = omp_get_wtime(); for (int z = 0; z < nz; z++){ for (int y = 0; y < ny; y++) { for (int x = 0; x < nx; x++) { h_vec_out[z*nx*ny + y*nx + x] = u(x, y, z); } } } time = omp_get_wtime() - start_time; cout << "decode cpu time: " << time << endl; cout << "sum: " << thrust::reduce(h_vec_out.begin(), h_vec_out.end()) << endl; cout << "GPU ZFP encode start" << endl; cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); setupConst<double>(perm, MAXBITS, MAXPREC, MINEXP, EBITS, EBIAS); cout << "Begin gpuDiffusion" << endl; gpuEncode<long long, unsigned long long, double, BSIZE>(h_vec_in); cout << "Finish gpuDiffusion" << endl; }
e4cfc7c2449279a833248e7c2485bc8243d94f8e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal d */ #include "common_magma.h" #define magmablas_dgemv_tesla magmablas_dgemv #define magmablas_dgemvt_tesla magmablas_dgemvt #define num_threads 64 #define dgemv_bs 64 __global__ void dgemv_kernel( int n, int m, int n1, const double *A, int lda, const double *x, double *y ) { int ind = blockIdx.x*num_threads + threadIdx.x; A += ind; x += threadIdx.x; double res = 0.f; __shared__ double buff[dgemv_bs]; for(int i=0; i<n1; i += dgemv_bs ){ __syncthreads(); buff[threadIdx.x] = x[i]; __syncthreads(); #pragma unroll for(int j=0; j < dgemv_bs ; j++){ res+=A[0]*buff[j]; A+=lda; } } __syncthreads(); if (m>n1){ buff[threadIdx.x] = x[n1]; __syncthreads(); for(int j=0; j<(m-n1); j++){ res += A[0]*buff[j]; A+=lda; } } if (ind<n) y[ind] = res; } extern "C" void magmablas_dgemvt_tesla( magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, double *z ); extern "C" void magmablas_dgemv_tesla(char trans, magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, magma_int_t incx, double beta, double *z, magma_int_t incz) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= This routine computes: 1) z = A x if trans == 'N' or 'n', alpha == 1, beta == 0, and incx == incz == 1 (using magmablas code) 2) z = alpha A^t x if trans == 'T' or 't', beta == 0, and incx == incz == 1 (using magmablas code) 3) z = alpha A^trans x + beta z otherwise, using CUBLAS. Arguments ========== TRANS - CHARACTER*1 On entry, TRANS specifies the operation to be performed as follows: TRANS = 'N' or 'n' z := alpha*A *x + beta*z TRANS = 'T' or 't' z := alpha*A'*x + beta*z M - (input) INTEGER On entry, M specifies the number of rows of the matrix A. N - (input) INTEGER. On entry, N specifies the number of columns of the matrix A ALPHA - DOUBLE PRECISION On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - (input) DOUBLE PRECISION array of dimension ( LDA, n ) on the GPU. LDA - (input) INTEGER LDA specifies the leading dimension of A. X - (input) DOUBLE PRECISION array of dimension n if trans == 'n' m if trans == 't' INCX - (input) Specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - DOUBLE PRECISION On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit Z - (output) DOUBLE PRECISION array of dimension m if trans == 'n' n if trans == 't' INCZ - (input) Specifies the increment for the elements of Z. INCZ must not be zero. Unchanged on exit. ===================================================================== */ if (incx == 1 && incz == 1 && beta == 0.) if (trans == 'n' || trans == 'N') if (alpha == 1.) { magma_int_t blocks; if (m % num_threads==0) blocks = m/num_threads; else blocks = m/num_threads + 1; dim3 grid(blocks, 1, 1); dim3 threads(num_threads, 1, 1); hipLaunchKernelGGL(( dgemv_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, (n/dgemv_bs)*dgemv_bs, A, lda, x, z); } else hipblasDgemv(trans, m, n, alpha, A, lda, x, incx, beta, z, incz); else magmablas_dgemvt_tesla(m, n, alpha, A, lda, x, z); else hipblasDgemv(trans, m, n, alpha, A, lda, x, incx, beta, z, incz); } __global__ void dgemvt_kernel1( int n, int m, double alpha, int n1, const double *A, int lda, const double *x, double *y ) { const int inx = threadIdx.x; const int iny = threadIdx.y; int ind = iny + __mul24(blockIdx.x,32); ind = inx + __mul24(ind,lda); int ind2 = inx + __mul24(iny,32); A += ind; x += ind2; double res = 0.f; __shared__ double buff[dgemv_bs]; __shared__ double la[32][33]; for(int i=0; i<n1; i += dgemv_bs ){ buff[ind2] = x[i]; #pragma unroll for(int j=0; j<16; j++) la[iny+__mul24(j,2)][inx] = A[j*__mul24(2,lda)]; __syncthreads(); #pragma unroll for(int j=0; j < 16; j++) res += la[inx][iny*16+j]*buff[j+iny*16]; A += 32; //=============================================== #pragma unroll for(int j=0; j<16; j++) la[iny+__mul24(j,2)][inx] = A[j*__mul24(2,lda)]; __syncthreads(); #pragma unroll for(int j=0; j < 16; j++) res += la[inx][iny*16+j]*buff[j+32+iny*16]; A += 32; } if (n>n1){ if (ind2>=(n-n1)) buff[ind2]=0.; else buff[ind2] = x[n1]; #pragma unroll for(int j=0; j<16; j++) la[iny+__mul24(j,2)][inx] = A[j*__mul24(2,lda)]; __syncthreads(); if (n-n1>16){ #pragma unroll for(int j=0; j < 16; j++) res += la[inx][iny*16+j]*buff[j+iny*16]; A += 32; #pragma unroll for(int j=0; j<16; j++) la[iny+__mul24(j,2)][inx] = A[j*__mul24(2,lda)]; __syncthreads(); #pragma unroll for(int j=0; j < 16; j++) res += la[inx][iny*16+j]*buff[j+32+iny*16]; } else { #pragma unroll for(int j=0; j < 16; j++) res += la[inx][iny*16+j]*buff[j+iny*16]; } } ind = inx + __mul24(blockIdx.x,32); la[inx][iny]= res; if (ind<m){ res = la[inx][0] + la[inx][1]; y[ind] = alpha*res; } } __global__ void dgemvt_kernel2( int n, int m, double alpha, int n1, const double *A, int lda, const double *x, double *y ) { const int inx = threadIdx.x; const int iny = threadIdx.y; int ind = iny + __mul24(blockIdx.x,16); ind = inx + __mul24(ind,lda); int ind2 = inx + __mul24(iny,16); if (ind2>31) ind2-=32; A += ind; x += ind2; if (ind2>31) ind2-=32; double res = 0.f; __shared__ double buff[32]; __shared__ double la[16][17]; for(int i=0; i<n1; i += 32 ){ buff[ind2] = x[i]; #pragma unroll for(int j=0; j<4; j++) la[iny+__mul24(j,4)][inx] = A[j*__mul24(4,lda)]; __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[inx][iny*4+j]*buff[j+iny*4]; A += 16; __syncthreads(); //=========================================== #pragma unroll for(int j=0; j<4; j++) la[iny+__mul24(j,4)][inx] = A[j*__mul24(4,lda)]; __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[inx][iny*4+j]*buff[j+16+iny*4]; A += 16; } if (n>n1){ if (ind2>=(n-n1)) buff[ind2]=0.; else buff[ind2] = x[n1]; __syncthreads(); #pragma unroll for(int j=0; j<4; j++) la[iny+__mul24(j,4)][inx] = A[j*__mul24(4,lda)]; __syncthreads(); if (n-n1>4){ #pragma unroll for(int j=0; j < 4; j++) res += la[inx][iny*4+j]*buff[j+iny*4]; A += 16; __syncthreads(); #pragma unroll for(int j=0; j<4; j++) la[iny+__mul24(j,4)][inx] = A[j*__mul24(4,lda)]; __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[inx][iny*4+j]*buff[j+16+iny*4]; } else { #pragma unroll for(int j=0; j < 4; j++) res += la[inx][iny*4+j]*buff[j+iny*4]; } } __syncthreads(); ind = inx + __mul24(blockIdx.x,16); la[inx][iny]= res; __syncthreads(); if (ind<m){ res = la[inx][0] + la[inx][1] + la[inx][2] + la[inx][3]; y[ind] = alpha*res; } } extern "C" void magmablas_dgemvt1_tesla( magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, double *z ) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= This routine computes z = alpha A^t x on the GPU. Recommended for large M and N. M - (input) INTEGER. On entry, M specifies the number of rows of the matrix A. N - (input) INTEGER. On entry, N specifies the number of columns of the matrix A A - (input) DOUBLE PRECISION array of dimension ( LDA, n ) on the GPU. LDA - (input) INTEGER. LDA specifies the leading dimension of A. X - (input) DOUBLE PRECISION array of dimension m. Z - (output) DOUBLE PRECISION array of dimension n. On exit Z = alpha A^t X. ===================================================================== */ magma_int_t blocks; if (n % 32==0) blocks = n/32; else blocks = n/32 + 1; dim3 grid(blocks, 1, 1); dim3 threads(32, 2, 1); hipLaunchKernelGGL(( dgemvt_kernel1), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, (m / dgemv_bs)*dgemv_bs, A, lda, x, z); } extern "C" void magmablas_dgemvt2_tesla( magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, double *z ) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= This routine computes z = alpha A^t x on the GPU. Used in least squares solver for N small (e.g. = BS, a block size of order 64, 128, etc). M - (input) INTEGER. On entry, M specifies the number of rows of the matrix A. N - (input) INTEGER. On entry, N specifies the number of columns of the matrix A A - (input) DOUBLE PRECISION array of dimension ( LDA, n ) on the GPU. LDA - (input) INTEGER. LDA specifies the leading dimension of A. X - (input) DOUBLE PRECISION array of dimension m. Z - (output) DOUBLE PRECISION array of dimension n. On exit Z = alpha A^t X. ===================================================================== */ magma_int_t blocks; if (n % 16==0) blocks = n/16; else blocks = n/16 + 1; dim3 grid(blocks, 1, 1); dim3 threads(16, 4, 1); hipLaunchKernelGGL(( dgemvt_kernel2), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, (m / 32)*32, A, lda, x, z); } extern "C" void magmablas_dgemvt_tesla( magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, double *z ) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= This routine computes z = alpha A^t x on the GPU. M - (input) INTEGER. On entry, M specifies the number of rows of the matrix A. N - (input) INTEGER. On entry, N specifies the number of columns of the matrix A A - (input) SINGLE PRECISION array of dimension (LDA, n) on the GPU. LDA - (input) INTEGER. LDA specifies the leading dimension of A. X - (input) SINGLE PRECISION array of dimension m. Z - (output) SINGLE PRECISION array of dimension n. On exit Z = alpha A^t X. ===================================================================== */ if (n<=128) magmablas_dgemvt2_tesla(m, n, alpha, A, lda, x, z); else magmablas_dgemvt1_tesla(m, n, alpha, A, lda, x, z); } #undef num_threads #undef dgemv_bs
e4cfc7c2449279a833248e7c2485bc8243d94f8e.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal d */ #include "common_magma.h" #define magmablas_dgemv_tesla magmablas_dgemv #define magmablas_dgemvt_tesla magmablas_dgemvt #define num_threads 64 #define dgemv_bs 64 __global__ void dgemv_kernel( int n, int m, int n1, const double *A, int lda, const double *x, double *y ) { int ind = blockIdx.x*num_threads + threadIdx.x; A += ind; x += threadIdx.x; double res = 0.f; __shared__ double buff[dgemv_bs]; for(int i=0; i<n1; i += dgemv_bs ){ __syncthreads(); buff[threadIdx.x] = x[i]; __syncthreads(); #pragma unroll for(int j=0; j < dgemv_bs ; j++){ res+=A[0]*buff[j]; A+=lda; } } __syncthreads(); if (m>n1){ buff[threadIdx.x] = x[n1]; __syncthreads(); for(int j=0; j<(m-n1); j++){ res += A[0]*buff[j]; A+=lda; } } if (ind<n) y[ind] = res; } extern "C" void magmablas_dgemvt_tesla( magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, double *z ); extern "C" void magmablas_dgemv_tesla(char trans, magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, magma_int_t incx, double beta, double *z, magma_int_t incz) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= This routine computes: 1) z = A x if trans == 'N' or 'n', alpha == 1, beta == 0, and incx == incz == 1 (using magmablas code) 2) z = alpha A^t x if trans == 'T' or 't', beta == 0, and incx == incz == 1 (using magmablas code) 3) z = alpha A^trans x + beta z otherwise, using CUBLAS. Arguments ========== TRANS - CHARACTER*1 On entry, TRANS specifies the operation to be performed as follows: TRANS = 'N' or 'n' z := alpha*A *x + beta*z TRANS = 'T' or 't' z := alpha*A'*x + beta*z M - (input) INTEGER On entry, M specifies the number of rows of the matrix A. N - (input) INTEGER. On entry, N specifies the number of columns of the matrix A ALPHA - DOUBLE PRECISION On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - (input) DOUBLE PRECISION array of dimension ( LDA, n ) on the GPU. LDA - (input) INTEGER LDA specifies the leading dimension of A. X - (input) DOUBLE PRECISION array of dimension n if trans == 'n' m if trans == 't' INCX - (input) Specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - DOUBLE PRECISION On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit Z - (output) DOUBLE PRECISION array of dimension m if trans == 'n' n if trans == 't' INCZ - (input) Specifies the increment for the elements of Z. INCZ must not be zero. Unchanged on exit. ===================================================================== */ if (incx == 1 && incz == 1 && beta == 0.) if (trans == 'n' || trans == 'N') if (alpha == 1.) { magma_int_t blocks; if (m % num_threads==0) blocks = m/num_threads; else blocks = m/num_threads + 1; dim3 grid(blocks, 1, 1); dim3 threads(num_threads, 1, 1); dgemv_kernel<<< grid, threads, 0, magma_stream >>> (m, n, (n/dgemv_bs)*dgemv_bs, A, lda, x, z); } else cublasDgemv(trans, m, n, alpha, A, lda, x, incx, beta, z, incz); else magmablas_dgemvt_tesla(m, n, alpha, A, lda, x, z); else cublasDgemv(trans, m, n, alpha, A, lda, x, incx, beta, z, incz); } __global__ void dgemvt_kernel1( int n, int m, double alpha, int n1, const double *A, int lda, const double *x, double *y ) { const int inx = threadIdx.x; const int iny = threadIdx.y; int ind = iny + __mul24(blockIdx.x,32); ind = inx + __mul24(ind,lda); int ind2 = inx + __mul24(iny,32); A += ind; x += ind2; double res = 0.f; __shared__ double buff[dgemv_bs]; __shared__ double la[32][33]; for(int i=0; i<n1; i += dgemv_bs ){ buff[ind2] = x[i]; #pragma unroll for(int j=0; j<16; j++) la[iny+__mul24(j,2)][inx] = A[j*__mul24(2,lda)]; __syncthreads(); #pragma unroll for(int j=0; j < 16; j++) res += la[inx][iny*16+j]*buff[j+iny*16]; A += 32; //=============================================== #pragma unroll for(int j=0; j<16; j++) la[iny+__mul24(j,2)][inx] = A[j*__mul24(2,lda)]; __syncthreads(); #pragma unroll for(int j=0; j < 16; j++) res += la[inx][iny*16+j]*buff[j+32+iny*16]; A += 32; } if (n>n1){ if (ind2>=(n-n1)) buff[ind2]=0.; else buff[ind2] = x[n1]; #pragma unroll for(int j=0; j<16; j++) la[iny+__mul24(j,2)][inx] = A[j*__mul24(2,lda)]; __syncthreads(); if (n-n1>16){ #pragma unroll for(int j=0; j < 16; j++) res += la[inx][iny*16+j]*buff[j+iny*16]; A += 32; #pragma unroll for(int j=0; j<16; j++) la[iny+__mul24(j,2)][inx] = A[j*__mul24(2,lda)]; __syncthreads(); #pragma unroll for(int j=0; j < 16; j++) res += la[inx][iny*16+j]*buff[j+32+iny*16]; } else { #pragma unroll for(int j=0; j < 16; j++) res += la[inx][iny*16+j]*buff[j+iny*16]; } } ind = inx + __mul24(blockIdx.x,32); la[inx][iny]= res; if (ind<m){ res = la[inx][0] + la[inx][1]; y[ind] = alpha*res; } } __global__ void dgemvt_kernel2( int n, int m, double alpha, int n1, const double *A, int lda, const double *x, double *y ) { const int inx = threadIdx.x; const int iny = threadIdx.y; int ind = iny + __mul24(blockIdx.x,16); ind = inx + __mul24(ind,lda); int ind2 = inx + __mul24(iny,16); if (ind2>31) ind2-=32; A += ind; x += ind2; if (ind2>31) ind2-=32; double res = 0.f; __shared__ double buff[32]; __shared__ double la[16][17]; for(int i=0; i<n1; i += 32 ){ buff[ind2] = x[i]; #pragma unroll for(int j=0; j<4; j++) la[iny+__mul24(j,4)][inx] = A[j*__mul24(4,lda)]; __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[inx][iny*4+j]*buff[j+iny*4]; A += 16; __syncthreads(); //=========================================== #pragma unroll for(int j=0; j<4; j++) la[iny+__mul24(j,4)][inx] = A[j*__mul24(4,lda)]; __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[inx][iny*4+j]*buff[j+16+iny*4]; A += 16; } if (n>n1){ if (ind2>=(n-n1)) buff[ind2]=0.; else buff[ind2] = x[n1]; __syncthreads(); #pragma unroll for(int j=0; j<4; j++) la[iny+__mul24(j,4)][inx] = A[j*__mul24(4,lda)]; __syncthreads(); if (n-n1>4){ #pragma unroll for(int j=0; j < 4; j++) res += la[inx][iny*4+j]*buff[j+iny*4]; A += 16; __syncthreads(); #pragma unroll for(int j=0; j<4; j++) la[iny+__mul24(j,4)][inx] = A[j*__mul24(4,lda)]; __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[inx][iny*4+j]*buff[j+16+iny*4]; } else { #pragma unroll for(int j=0; j < 4; j++) res += la[inx][iny*4+j]*buff[j+iny*4]; } } __syncthreads(); ind = inx + __mul24(blockIdx.x,16); la[inx][iny]= res; __syncthreads(); if (ind<m){ res = la[inx][0] + la[inx][1] + la[inx][2] + la[inx][3]; y[ind] = alpha*res; } } extern "C" void magmablas_dgemvt1_tesla( magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, double *z ) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= This routine computes z = alpha A^t x on the GPU. Recommended for large M and N. M - (input) INTEGER. On entry, M specifies the number of rows of the matrix A. N - (input) INTEGER. On entry, N specifies the number of columns of the matrix A A - (input) DOUBLE PRECISION array of dimension ( LDA, n ) on the GPU. LDA - (input) INTEGER. LDA specifies the leading dimension of A. X - (input) DOUBLE PRECISION array of dimension m. Z - (output) DOUBLE PRECISION array of dimension n. On exit Z = alpha A^t X. ===================================================================== */ magma_int_t blocks; if (n % 32==0) blocks = n/32; else blocks = n/32 + 1; dim3 grid(blocks, 1, 1); dim3 threads(32, 2, 1); dgemvt_kernel1<<< grid, threads, 0, magma_stream >>> (m, n, alpha, (m / dgemv_bs)*dgemv_bs, A, lda, x, z); } extern "C" void magmablas_dgemvt2_tesla( magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, double *z ) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= This routine computes z = alpha A^t x on the GPU. Used in least squares solver for N small (e.g. = BS, a block size of order 64, 128, etc). M - (input) INTEGER. On entry, M specifies the number of rows of the matrix A. N - (input) INTEGER. On entry, N specifies the number of columns of the matrix A A - (input) DOUBLE PRECISION array of dimension ( LDA, n ) on the GPU. LDA - (input) INTEGER. LDA specifies the leading dimension of A. X - (input) DOUBLE PRECISION array of dimension m. Z - (output) DOUBLE PRECISION array of dimension n. On exit Z = alpha A^t X. ===================================================================== */ magma_int_t blocks; if (n % 16==0) blocks = n/16; else blocks = n/16 + 1; dim3 grid(blocks, 1, 1); dim3 threads(16, 4, 1); dgemvt_kernel2<<< grid, threads, 0, magma_stream >>> (m, n, alpha, (m / 32)*32, A, lda, x, z); } extern "C" void magmablas_dgemvt_tesla( magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, double *z ) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= This routine computes z = alpha A^t x on the GPU. M - (input) INTEGER. On entry, M specifies the number of rows of the matrix A. N - (input) INTEGER. On entry, N specifies the number of columns of the matrix A A - (input) SINGLE PRECISION array of dimension (LDA, n) on the GPU. LDA - (input) INTEGER. LDA specifies the leading dimension of A. X - (input) SINGLE PRECISION array of dimension m. Z - (output) SINGLE PRECISION array of dimension n. On exit Z = alpha A^t X. ===================================================================== */ if (n<=128) magmablas_dgemvt2_tesla(m, n, alpha, A, lda, x, z); else magmablas_dgemvt1_tesla(m, n, alpha, A, lda, x, z); } #undef num_threads #undef dgemv_bs
09a2d0ad62fef6c16189094d93122197708f63cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ == 47) # define CUDA_DISABLER #endif #if !defined CUDA_DISABLER #include <thrust/device_ptr.h> #include <thrust/transform.h> #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/emulation.hpp" #include "opencv2/core/cuda/vec_math.hpp" #include "opencv2/core/cuda/functional.hpp" #include "opencv2/opencv_modules.hpp" #ifdef HAVE_OPENCV_CUDAARITHM namespace cv { namespace cuda { namespace device { namespace ght { __device__ int g_counter; template <typename T, int PIXELS_PER_THREAD> __global__ void buildEdgePointList(const PtrStepSzb edges, const PtrStep<T> dx, const PtrStep<T> dy, unsigned int* coordList, float* thetaList) { __shared__ unsigned int s_coordLists[4][32 * PIXELS_PER_THREAD]; __shared__ float s_thetaLists[4][32 * PIXELS_PER_THREAD]; __shared__ int s_sizes[4]; __shared__ int s_globStart[4]; const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (threadIdx.x == 0) s_sizes[threadIdx.y] = 0; __syncthreads(); if (y < edges.rows) { // fill the queue const uchar* edgesRow = edges.ptr(y); const T* dxRow = dx.ptr(y); const T* dyRow = dy.ptr(y); for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < edges.cols; ++i, xx += blockDim.x) { const T dxVal = dxRow[xx]; const T dyVal = dyRow[xx]; if (edgesRow[xx] && (dxVal != 0 || dyVal != 0)) { const unsigned int coord = (y << 16) | xx; float theta = ::atan2f(dyVal, dxVal); if (theta < 0) theta += 2.0f * CV_PI_F; const int qidx = Emulation::smem::atomicAdd(&s_sizes[threadIdx.y], 1); s_coordLists[threadIdx.y][qidx] = coord; s_thetaLists[threadIdx.y][qidx] = theta; } } } __syncthreads(); // let one thread reserve the space required in the global list if (threadIdx.x == 0 && threadIdx.y == 0) { // find how many items are stored in each list int totalSize = 0; for (int i = 0; i < blockDim.y; ++i) { s_globStart[i] = totalSize; totalSize += s_sizes[i]; } // calculate the offset in the global list const int globalOffset = atomicAdd(&g_counter, totalSize); for (int i = 0; i < blockDim.y; ++i) s_globStart[i] += globalOffset; } __syncthreads(); // copy local queues to global queue const int qsize = s_sizes[threadIdx.y]; int gidx = s_globStart[threadIdx.y] + threadIdx.x; for(int i = threadIdx.x; i < qsize; i += blockDim.x, gidx += blockDim.x) { coordList[gidx] = s_coordLists[threadIdx.y][i]; thetaList[gidx] = s_thetaLists[threadIdx.y][i]; } } template <typename T> int buildEdgePointList_gpu(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList) { const int PIXELS_PER_THREAD = 8; void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 4); const dim3 grid(divUp(edges.cols, block.x * PIXELS_PER_THREAD), divUp(edges.rows, block.y)); cudaSafeCall( hipFuncSetCacheConfig(buildEdgePointList<T, PIXELS_PER_THREAD>, hipFuncCachePreferShared) ); hipLaunchKernelGGL(( buildEdgePointList<T, PIXELS_PER_THREAD>), dim3(grid), dim3(block), 0, 0, edges, (PtrStepSz<T>) dx, (PtrStepSz<T>) dy, coordList, thetaList); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); return totalCount; } template int buildEdgePointList_gpu<short>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList); template int buildEdgePointList_gpu<int>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList); template int buildEdgePointList_gpu<float>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList); __global__ void buildRTable(const unsigned int* coordList, const float* thetaList, const int pointsCount, PtrStep<short2> r_table, int* r_sizes, int maxSize, const short2 templCenter, const float thetaScale) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= pointsCount) return; const unsigned int coord = coordList[tid]; short2 p; p.x = (coord & 0xFFFF); p.y = (coord >> 16) & 0xFFFF; const float theta = thetaList[tid]; const int n = __float2int_rn(theta * thetaScale); const int ind = ::atomicAdd(r_sizes + n, 1); if (ind < maxSize) r_table(n, ind) = saturate_cast<short2>(p - templCenter); } void buildRTable_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, PtrStepSz<short2> r_table, int* r_sizes, short2 templCenter, int levels) { const dim3 block(256); const dim3 grid(divUp(pointsCount, block.x)); const float thetaScale = levels / (2.0f * CV_PI_F); hipLaunchKernelGGL(( buildRTable), dim3(grid), dim3(block), 0, 0, coordList, thetaList, pointsCount, r_table, r_sizes, r_table.cols, templCenter, thetaScale); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // Ballard_Pos __global__ void Ballard_Pos_calcHist(const unsigned int* coordList, const float* thetaList, const int pointsCount, const PtrStep<short2> r_table, const int* r_sizes, PtrStepSzi hist, const float idp, const float thetaScale) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= pointsCount) return; const unsigned int coord = coordList[tid]; short2 p; p.x = (coord & 0xFFFF); p.y = (coord >> 16) & 0xFFFF; const float theta = thetaList[tid]; const int n = __float2int_rn(theta * thetaScale); const short2* r_row = r_table.ptr(n); const int r_row_size = r_sizes[n]; for (int j = 0; j < r_row_size; ++j) { short2 c = saturate_cast<short2>(p - r_row[j]); c.x = __float2int_rn(c.x * idp); c.y = __float2int_rn(c.y * idp); if (c.x >= 0 && c.x < hist.cols - 2 && c.y >= 0 && c.y < hist.rows - 2) ::atomicAdd(hist.ptr(c.y + 1) + c.x + 1, 1); } } void Ballard_Pos_calcHist_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, PtrStepSz<short2> r_table, const int* r_sizes, PtrStepSzi hist, float dp, int levels) { const dim3 block(256); const dim3 grid(divUp(pointsCount, block.x)); const float idp = 1.0f / dp; const float thetaScale = levels / (2.0f * CV_PI_F); hipLaunchKernelGGL(( Ballard_Pos_calcHist), dim3(grid), dim3(block), 0, 0, coordList, thetaList, pointsCount, r_table, r_sizes, hist, idp, thetaScale); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } __global__ void Ballard_Pos_findPosInHist(const PtrStepSzi hist, float4* out, int3* votes, const int maxSize, const float dp, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= hist.cols - 2 || y >= hist.rows - 2) return; const int curVotes = hist(y + 1, x + 1); if (curVotes > threshold && curVotes > hist(y + 1, x) && curVotes >= hist(y + 1, x + 2) && curVotes > hist(y, x + 1) && curVotes >= hist(y + 2, x + 1)) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float4(x * dp, y * dp, 1.0f, 0.0f); votes[ind] = make_int3(curVotes, 0, 0); } } } int Ballard_Pos_findPosInHist_gpu(PtrStepSzi hist, float4* out, int3* votes, int maxSize, float dp, int threshold) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(hist.cols - 2, block.x), divUp(hist.rows - 2, block.y)); cudaSafeCall( hipFuncSetCacheConfig(Ballard_Pos_findPosInHist, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( Ballard_Pos_findPosInHist), dim3(grid), dim3(block), 0, 0, hist, out, votes, maxSize, dp, threshold); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); return totalCount; } //////////////////////////////////////////////////////////////////////// // Guil_Full struct FeatureTable { uchar* p1_pos_data; size_t p1_pos_step; uchar* p1_theta_data; size_t p1_theta_step; uchar* p2_pos_data; size_t p2_pos_step; uchar* d12_data; size_t d12_step; uchar* r1_data; size_t r1_step; uchar* r2_data; size_t r2_step; }; __constant__ FeatureTable c_templFeatures; __constant__ FeatureTable c_imageFeatures; void Guil_Full_setTemplFeatures(PtrStepb p1_pos, PtrStepb p1_theta, PtrStepb p2_pos, PtrStepb d12, PtrStepb r1, PtrStepb r2) { FeatureTable tbl; tbl.p1_pos_data = p1_pos.data; tbl.p1_pos_step = p1_pos.step; tbl.p1_theta_data = p1_theta.data; tbl.p1_theta_step = p1_theta.step; tbl.p2_pos_data = p2_pos.data; tbl.p2_pos_step = p2_pos.step; tbl.d12_data = d12.data; tbl.d12_step = d12.step; tbl.r1_data = r1.data; tbl.r1_step = r1.step; tbl.r2_data = r2.data; tbl.r2_step = r2.step; cudaSafeCall( hipMemcpyToSymbol(c_templFeatures, &tbl, sizeof(FeatureTable)) ); } void Guil_Full_setImageFeatures(PtrStepb p1_pos, PtrStepb p1_theta, PtrStepb p2_pos, PtrStepb d12, PtrStepb r1, PtrStepb r2) { FeatureTable tbl; tbl.p1_pos_data = p1_pos.data; tbl.p1_pos_step = p1_pos.step; tbl.p1_theta_data = p1_theta.data; tbl.p1_theta_step = p1_theta.step; tbl.p2_pos_data = p2_pos.data; tbl.p2_pos_step = p2_pos.step; tbl.d12_data = d12.data; tbl.d12_step = d12.step; tbl.r1_data = r1.data; tbl.r1_step = r1.step; tbl.r2_data = r2.data; tbl.r2_step = r2.step; cudaSafeCall( hipMemcpyToSymbol(c_imageFeatures, &tbl, sizeof(FeatureTable)) ); } struct TemplFeatureTable { static __device__ float2* p1_pos(int n) { return (float2*)(c_templFeatures.p1_pos_data + n * c_templFeatures.p1_pos_step); } static __device__ float* p1_theta(int n) { return (float*)(c_templFeatures.p1_theta_data + n * c_templFeatures.p1_theta_step); } static __device__ float2* p2_pos(int n) { return (float2*)(c_templFeatures.p2_pos_data + n * c_templFeatures.p2_pos_step); } static __device__ float* d12(int n) { return (float*)(c_templFeatures.d12_data + n * c_templFeatures.d12_step); } static __device__ float2* r1(int n) { return (float2*)(c_templFeatures.r1_data + n * c_templFeatures.r1_step); } static __device__ float2* r2(int n) { return (float2*)(c_templFeatures.r2_data + n * c_templFeatures.r2_step); } }; struct ImageFeatureTable { static __device__ float2* p1_pos(int n) { return (float2*)(c_imageFeatures.p1_pos_data + n * c_imageFeatures.p1_pos_step); } static __device__ float* p1_theta(int n) { return (float*)(c_imageFeatures.p1_theta_data + n * c_imageFeatures.p1_theta_step); } static __device__ float2* p2_pos(int n) { return (float2*)(c_imageFeatures.p2_pos_data + n * c_imageFeatures.p2_pos_step); } static __device__ float* d12(int n) { return (float*)(c_imageFeatures.d12_data + n * c_imageFeatures.d12_step); } static __device__ float2* r1(int n) { return (float2*)(c_imageFeatures.r1_data + n * c_imageFeatures.r1_step); } static __device__ float2* r2(int n) { return (float2*)(c_imageFeatures.r2_data + n * c_imageFeatures.r2_step); } }; __device__ float clampAngle(float a) { float res = a; while (res > 2.0f * CV_PI_F) res -= 2.0f * CV_PI_F; while (res < 0.0f) res += 2.0f * CV_PI_F; return res; } __device__ bool angleEq(float a, float b, float eps) { return (::fabs(clampAngle(a - b)) <= eps); } template <class FT, bool isTempl> __global__ void Guil_Full_buildFeatureList(const unsigned int* coordList, const float* thetaList, const int pointsCount, int* sizes, const int maxSize, const float xi, const float angleEpsilon, const float alphaScale, const float2 center, const float maxDist) { const float p1_theta = thetaList[blockIdx.x]; const unsigned int coord1 = coordList[blockIdx.x]; float2 p1_pos; p1_pos.x = (coord1 & 0xFFFF); p1_pos.y = (coord1 >> 16) & 0xFFFF; for (int i = threadIdx.x; i < pointsCount; i += blockDim.x) { const float p2_theta = thetaList[i]; const unsigned int coord2 = coordList[i]; float2 p2_pos; p2_pos.x = (coord2 & 0xFFFF); p2_pos.y = (coord2 >> 16) & 0xFFFF; if (angleEq(p1_theta - p2_theta, xi, angleEpsilon)) { const float2 d = p1_pos - p2_pos; float alpha12 = clampAngle(::atan2(d.y, d.x) - p1_theta); float d12 = ::sqrtf(d.x * d.x + d.y * d.y); if (d12 > maxDist) continue; float2 r1 = p1_pos - center; float2 r2 = p2_pos - center; const int n = __float2int_rn(alpha12 * alphaScale); const int ind = ::atomicAdd(sizes + n, 1); if (ind < maxSize) { if (!isTempl) { FT::p1_pos(n)[ind] = p1_pos; FT::p2_pos(n)[ind] = p2_pos; } FT::p1_theta(n)[ind] = p1_theta; FT::d12(n)[ind] = d12; if (isTempl) { FT::r1(n)[ind] = r1; FT::r2(n)[ind] = r2; } } } } } template <class FT, bool isTempl> void Guil_Full_buildFeatureList_caller(const unsigned int* coordList, const float* thetaList, int pointsCount, int* sizes, int maxSize, float xi, float angleEpsilon, int levels, float2 center, float maxDist) { const dim3 block(256); const dim3 grid(pointsCount); const float alphaScale = levels / (2.0f * CV_PI_F); hipLaunchKernelGGL(( Guil_Full_buildFeatureList<FT, isTempl>), dim3(grid), dim3(block), 0, 0, coordList, thetaList, pointsCount, sizes, maxSize, xi * (CV_PI_F / 180.0f), angleEpsilon * (CV_PI_F / 180.0f), alphaScale, center, maxDist); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); thrust::device_ptr<int> sizesPtr(sizes); thrust::transform(sizesPtr, sizesPtr + levels + 1, sizesPtr, device::bind2nd(device::minimum<int>(), maxSize)); } void Guil_Full_buildTemplFeatureList_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, int* sizes, int maxSize, float xi, float angleEpsilon, int levels, float2 center, float maxDist) { Guil_Full_buildFeatureList_caller<TemplFeatureTable, true>(coordList, thetaList, pointsCount, sizes, maxSize, xi, angleEpsilon, levels, center, maxDist); } void Guil_Full_buildImageFeatureList_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, int* sizes, int maxSize, float xi, float angleEpsilon, int levels, float2 center, float maxDist) { Guil_Full_buildFeatureList_caller<ImageFeatureTable, false>(coordList, thetaList, pointsCount, sizes, maxSize, xi, angleEpsilon, levels, center, maxDist); } __global__ void Guil_Full_calcOHist(const int* templSizes, const int* imageSizes, int* OHist, const float minAngle, const float maxAngle, const float iAngleStep, const int angleRange) { extern __shared__ int s_OHist[]; for (int i = threadIdx.x; i <= angleRange; i += blockDim.x) s_OHist[i] = 0; __syncthreads(); const int tIdx = blockIdx.x; const int level = blockIdx.y; const int tSize = templSizes[level]; if (tIdx < tSize) { const int imSize = imageSizes[level]; const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx]; for (int i = threadIdx.x; i < imSize; i += blockDim.x) { const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i]; const float angle = clampAngle(im_p1_theta - t_p1_theta); if (angle >= minAngle && angle <= maxAngle) { const int n = __float2int_rn((angle - minAngle) * iAngleStep); Emulation::smem::atomicAdd(&s_OHist[n], 1); } } } __syncthreads(); for (int i = threadIdx.x; i <= angleRange; i += blockDim.x) ::atomicAdd(OHist + i, s_OHist[i]); } void Guil_Full_calcOHist_gpu(const int* templSizes, const int* imageSizes, int* OHist, float minAngle, float maxAngle, float angleStep, int angleRange, int levels, int tMaxSize) { const dim3 block(256); const dim3 grid(tMaxSize, levels + 1); minAngle *= (CV_PI_F / 180.0f); maxAngle *= (CV_PI_F / 180.0f); angleStep *= (CV_PI_F / 180.0f); const size_t smemSize = (angleRange + 1) * sizeof(float); hipLaunchKernelGGL(( Guil_Full_calcOHist), dim3(grid), dim3(block), smemSize, 0, templSizes, imageSizes, OHist, minAngle, maxAngle, 1.0f / angleStep, angleRange); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } __global__ void Guil_Full_calcSHist(const int* templSizes, const int* imageSizes, int* SHist, const float angle, const float angleEpsilon, const float minScale, const float maxScale, const float iScaleStep, const int scaleRange) { extern __shared__ int s_SHist[]; for (int i = threadIdx.x; i <= scaleRange; i += blockDim.x) s_SHist[i] = 0; __syncthreads(); const int tIdx = blockIdx.x; const int level = blockIdx.y; const int tSize = templSizes[level]; if (tIdx < tSize) { const int imSize = imageSizes[level]; const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx] + angle; const float t_d12 = TemplFeatureTable::d12(level)[tIdx] + angle; for (int i = threadIdx.x; i < imSize; i += blockDim.x) { const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i]; const float im_d12 = ImageFeatureTable::d12(level)[i]; if (angleEq(im_p1_theta, t_p1_theta, angleEpsilon)) { const float scale = im_d12 / t_d12; if (scale >= minScale && scale <= maxScale) { const int s = __float2int_rn((scale - minScale) * iScaleStep); Emulation::smem::atomicAdd(&s_SHist[s], 1); } } } } __syncthreads(); for (int i = threadIdx.x; i <= scaleRange; i += blockDim.x) ::atomicAdd(SHist + i, s_SHist[i]); } void Guil_Full_calcSHist_gpu(const int* templSizes, const int* imageSizes, int* SHist, float angle, float angleEpsilon, float minScale, float maxScale, float iScaleStep, int scaleRange, int levels, int tMaxSize) { const dim3 block(256); const dim3 grid(tMaxSize, levels + 1); angle *= (CV_PI_F / 180.0f); angleEpsilon *= (CV_PI_F / 180.0f); const size_t smemSize = (scaleRange + 1) * sizeof(float); hipLaunchKernelGGL(( Guil_Full_calcSHist), dim3(grid), dim3(block), smemSize, 0, templSizes, imageSizes, SHist, angle, angleEpsilon, minScale, maxScale, iScaleStep, scaleRange); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } __global__ void Guil_Full_calcPHist(const int* templSizes, const int* imageSizes, PtrStepSzi PHist, const float angle, const float sinVal, const float cosVal, const float angleEpsilon, const float scale, const float idp) { const int tIdx = blockIdx.x; const int level = blockIdx.y; const int tSize = templSizes[level]; if (tIdx < tSize) { const int imSize = imageSizes[level]; const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx] + angle; float2 r1 = TemplFeatureTable::r1(level)[tIdx]; float2 r2 = TemplFeatureTable::r2(level)[tIdx]; r1 = r1 * scale; r2 = r2 * scale; r1 = make_float2(cosVal * r1.x - sinVal * r1.y, sinVal * r1.x + cosVal * r1.y); r2 = make_float2(cosVal * r2.x - sinVal * r2.y, sinVal * r2.x + cosVal * r2.y); for (int i = threadIdx.x; i < imSize; i += blockDim.x) { const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i]; const float2 im_p1_pos = ImageFeatureTable::p1_pos(level)[i]; const float2 im_p2_pos = ImageFeatureTable::p2_pos(level)[i]; if (angleEq(im_p1_theta, t_p1_theta, angleEpsilon)) { float2 c1, c2; c1 = im_p1_pos - r1; c1 = c1 * idp; c2 = im_p2_pos - r2; c2 = c2 * idp; if (::fabs(c1.x - c2.x) > 1 || ::fabs(c1.y - c2.y) > 1) continue; if (c1.y >= 0 && c1.y < PHist.rows - 2 && c1.x >= 0 && c1.x < PHist.cols - 2) ::atomicAdd(PHist.ptr(__float2int_rn(c1.y) + 1) + __float2int_rn(c1.x) + 1, 1); } } } } void Guil_Full_calcPHist_gpu(const int* templSizes, const int* imageSizes, PtrStepSzi PHist, float angle, float angleEpsilon, float scale, float dp, int levels, int tMaxSize) { const dim3 block(256); const dim3 grid(tMaxSize, levels + 1); angle *= (CV_PI_F / 180.0f); angleEpsilon *= (CV_PI_F / 180.0f); const float sinVal = ::sinf(angle); const float cosVal = ::cosf(angle); cudaSafeCall( hipFuncSetCacheConfig(Guil_Full_calcPHist, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( Guil_Full_calcPHist), dim3(grid), dim3(block), 0, 0, templSizes, imageSizes, PHist, angle, sinVal, cosVal, angleEpsilon, scale, 1.0f / dp); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } __global__ void Guil_Full_findPosInHist(const PtrStepSzi hist, float4* out, int3* votes, const int maxSize, const float angle, const int angleVotes, const float scale, const int scaleVotes, const float dp, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= hist.cols - 2 || y >= hist.rows - 2) return; const int curVotes = hist(y + 1, x + 1); if (curVotes > threshold && curVotes > hist(y + 1, x) && curVotes >= hist(y + 1, x + 2) && curVotes > hist(y, x + 1) && curVotes >= hist(y + 2, x + 1)) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float4(x * dp, y * dp, scale, angle); votes[ind] = make_int3(curVotes, scaleVotes, angleVotes); } } } int Guil_Full_findPosInHist_gpu(PtrStepSzi hist, float4* out, int3* votes, int curSize, int maxSize, float angle, int angleVotes, float scale, int scaleVotes, float dp, int threshold) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemcpy(counterPtr, &curSize, sizeof(int), hipMemcpyHostToDevice) ); const dim3 block(32, 8); const dim3 grid(divUp(hist.cols - 2, block.x), divUp(hist.rows - 2, block.y)); cudaSafeCall( hipFuncSetCacheConfig(Guil_Full_findPosInHist, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( Guil_Full_findPosInHist), dim3(grid), dim3(block), 0, 0, hist, out, votes, maxSize, angle, angleVotes, scale, scaleVotes, dp, threshold); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); return totalCount; } } }}} #endif // HAVE_OPENCV_CUDAARITHM #endif /* CUDA_DISABLER */
09a2d0ad62fef6c16189094d93122197708f63cf.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ == 47) # define CUDA_DISABLER #endif #if !defined CUDA_DISABLER #include <thrust/device_ptr.h> #include <thrust/transform.h> #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/emulation.hpp" #include "opencv2/core/cuda/vec_math.hpp" #include "opencv2/core/cuda/functional.hpp" #include "opencv2/opencv_modules.hpp" #ifdef HAVE_OPENCV_CUDAARITHM namespace cv { namespace cuda { namespace device { namespace ght { __device__ int g_counter; template <typename T, int PIXELS_PER_THREAD> __global__ void buildEdgePointList(const PtrStepSzb edges, const PtrStep<T> dx, const PtrStep<T> dy, unsigned int* coordList, float* thetaList) { __shared__ unsigned int s_coordLists[4][32 * PIXELS_PER_THREAD]; __shared__ float s_thetaLists[4][32 * PIXELS_PER_THREAD]; __shared__ int s_sizes[4]; __shared__ int s_globStart[4]; const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (threadIdx.x == 0) s_sizes[threadIdx.y] = 0; __syncthreads(); if (y < edges.rows) { // fill the queue const uchar* edgesRow = edges.ptr(y); const T* dxRow = dx.ptr(y); const T* dyRow = dy.ptr(y); for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < edges.cols; ++i, xx += blockDim.x) { const T dxVal = dxRow[xx]; const T dyVal = dyRow[xx]; if (edgesRow[xx] && (dxVal != 0 || dyVal != 0)) { const unsigned int coord = (y << 16) | xx; float theta = ::atan2f(dyVal, dxVal); if (theta < 0) theta += 2.0f * CV_PI_F; const int qidx = Emulation::smem::atomicAdd(&s_sizes[threadIdx.y], 1); s_coordLists[threadIdx.y][qidx] = coord; s_thetaLists[threadIdx.y][qidx] = theta; } } } __syncthreads(); // let one thread reserve the space required in the global list if (threadIdx.x == 0 && threadIdx.y == 0) { // find how many items are stored in each list int totalSize = 0; for (int i = 0; i < blockDim.y; ++i) { s_globStart[i] = totalSize; totalSize += s_sizes[i]; } // calculate the offset in the global list const int globalOffset = atomicAdd(&g_counter, totalSize); for (int i = 0; i < blockDim.y; ++i) s_globStart[i] += globalOffset; } __syncthreads(); // copy local queues to global queue const int qsize = s_sizes[threadIdx.y]; int gidx = s_globStart[threadIdx.y] + threadIdx.x; for(int i = threadIdx.x; i < qsize; i += blockDim.x, gidx += blockDim.x) { coordList[gidx] = s_coordLists[threadIdx.y][i]; thetaList[gidx] = s_thetaLists[threadIdx.y][i]; } } template <typename T> int buildEdgePointList_gpu(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList) { const int PIXELS_PER_THREAD = 8; void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 4); const dim3 grid(divUp(edges.cols, block.x * PIXELS_PER_THREAD), divUp(edges.rows, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(buildEdgePointList<T, PIXELS_PER_THREAD>, cudaFuncCachePreferShared) ); buildEdgePointList<T, PIXELS_PER_THREAD><<<grid, block>>>(edges, (PtrStepSz<T>) dx, (PtrStepSz<T>) dy, coordList, thetaList); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); return totalCount; } template int buildEdgePointList_gpu<short>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList); template int buildEdgePointList_gpu<int>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList); template int buildEdgePointList_gpu<float>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList); __global__ void buildRTable(const unsigned int* coordList, const float* thetaList, const int pointsCount, PtrStep<short2> r_table, int* r_sizes, int maxSize, const short2 templCenter, const float thetaScale) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= pointsCount) return; const unsigned int coord = coordList[tid]; short2 p; p.x = (coord & 0xFFFF); p.y = (coord >> 16) & 0xFFFF; const float theta = thetaList[tid]; const int n = __float2int_rn(theta * thetaScale); const int ind = ::atomicAdd(r_sizes + n, 1); if (ind < maxSize) r_table(n, ind) = saturate_cast<short2>(p - templCenter); } void buildRTable_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, PtrStepSz<short2> r_table, int* r_sizes, short2 templCenter, int levels) { const dim3 block(256); const dim3 grid(divUp(pointsCount, block.x)); const float thetaScale = levels / (2.0f * CV_PI_F); buildRTable<<<grid, block>>>(coordList, thetaList, pointsCount, r_table, r_sizes, r_table.cols, templCenter, thetaScale); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // Ballard_Pos __global__ void Ballard_Pos_calcHist(const unsigned int* coordList, const float* thetaList, const int pointsCount, const PtrStep<short2> r_table, const int* r_sizes, PtrStepSzi hist, const float idp, const float thetaScale) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= pointsCount) return; const unsigned int coord = coordList[tid]; short2 p; p.x = (coord & 0xFFFF); p.y = (coord >> 16) & 0xFFFF; const float theta = thetaList[tid]; const int n = __float2int_rn(theta * thetaScale); const short2* r_row = r_table.ptr(n); const int r_row_size = r_sizes[n]; for (int j = 0; j < r_row_size; ++j) { short2 c = saturate_cast<short2>(p - r_row[j]); c.x = __float2int_rn(c.x * idp); c.y = __float2int_rn(c.y * idp); if (c.x >= 0 && c.x < hist.cols - 2 && c.y >= 0 && c.y < hist.rows - 2) ::atomicAdd(hist.ptr(c.y + 1) + c.x + 1, 1); } } void Ballard_Pos_calcHist_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, PtrStepSz<short2> r_table, const int* r_sizes, PtrStepSzi hist, float dp, int levels) { const dim3 block(256); const dim3 grid(divUp(pointsCount, block.x)); const float idp = 1.0f / dp; const float thetaScale = levels / (2.0f * CV_PI_F); Ballard_Pos_calcHist<<<grid, block>>>(coordList, thetaList, pointsCount, r_table, r_sizes, hist, idp, thetaScale); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void Ballard_Pos_findPosInHist(const PtrStepSzi hist, float4* out, int3* votes, const int maxSize, const float dp, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= hist.cols - 2 || y >= hist.rows - 2) return; const int curVotes = hist(y + 1, x + 1); if (curVotes > threshold && curVotes > hist(y + 1, x) && curVotes >= hist(y + 1, x + 2) && curVotes > hist(y, x + 1) && curVotes >= hist(y + 2, x + 1)) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float4(x * dp, y * dp, 1.0f, 0.0f); votes[ind] = make_int3(curVotes, 0, 0); } } } int Ballard_Pos_findPosInHist_gpu(PtrStepSzi hist, float4* out, int3* votes, int maxSize, float dp, int threshold) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(hist.cols - 2, block.x), divUp(hist.rows - 2, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(Ballard_Pos_findPosInHist, cudaFuncCachePreferL1) ); Ballard_Pos_findPosInHist<<<grid, block>>>(hist, out, votes, maxSize, dp, threshold); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); return totalCount; } //////////////////////////////////////////////////////////////////////// // Guil_Full struct FeatureTable { uchar* p1_pos_data; size_t p1_pos_step; uchar* p1_theta_data; size_t p1_theta_step; uchar* p2_pos_data; size_t p2_pos_step; uchar* d12_data; size_t d12_step; uchar* r1_data; size_t r1_step; uchar* r2_data; size_t r2_step; }; __constant__ FeatureTable c_templFeatures; __constant__ FeatureTable c_imageFeatures; void Guil_Full_setTemplFeatures(PtrStepb p1_pos, PtrStepb p1_theta, PtrStepb p2_pos, PtrStepb d12, PtrStepb r1, PtrStepb r2) { FeatureTable tbl; tbl.p1_pos_data = p1_pos.data; tbl.p1_pos_step = p1_pos.step; tbl.p1_theta_data = p1_theta.data; tbl.p1_theta_step = p1_theta.step; tbl.p2_pos_data = p2_pos.data; tbl.p2_pos_step = p2_pos.step; tbl.d12_data = d12.data; tbl.d12_step = d12.step; tbl.r1_data = r1.data; tbl.r1_step = r1.step; tbl.r2_data = r2.data; tbl.r2_step = r2.step; cudaSafeCall( cudaMemcpyToSymbol(c_templFeatures, &tbl, sizeof(FeatureTable)) ); } void Guil_Full_setImageFeatures(PtrStepb p1_pos, PtrStepb p1_theta, PtrStepb p2_pos, PtrStepb d12, PtrStepb r1, PtrStepb r2) { FeatureTable tbl; tbl.p1_pos_data = p1_pos.data; tbl.p1_pos_step = p1_pos.step; tbl.p1_theta_data = p1_theta.data; tbl.p1_theta_step = p1_theta.step; tbl.p2_pos_data = p2_pos.data; tbl.p2_pos_step = p2_pos.step; tbl.d12_data = d12.data; tbl.d12_step = d12.step; tbl.r1_data = r1.data; tbl.r1_step = r1.step; tbl.r2_data = r2.data; tbl.r2_step = r2.step; cudaSafeCall( cudaMemcpyToSymbol(c_imageFeatures, &tbl, sizeof(FeatureTable)) ); } struct TemplFeatureTable { static __device__ float2* p1_pos(int n) { return (float2*)(c_templFeatures.p1_pos_data + n * c_templFeatures.p1_pos_step); } static __device__ float* p1_theta(int n) { return (float*)(c_templFeatures.p1_theta_data + n * c_templFeatures.p1_theta_step); } static __device__ float2* p2_pos(int n) { return (float2*)(c_templFeatures.p2_pos_data + n * c_templFeatures.p2_pos_step); } static __device__ float* d12(int n) { return (float*)(c_templFeatures.d12_data + n * c_templFeatures.d12_step); } static __device__ float2* r1(int n) { return (float2*)(c_templFeatures.r1_data + n * c_templFeatures.r1_step); } static __device__ float2* r2(int n) { return (float2*)(c_templFeatures.r2_data + n * c_templFeatures.r2_step); } }; struct ImageFeatureTable { static __device__ float2* p1_pos(int n) { return (float2*)(c_imageFeatures.p1_pos_data + n * c_imageFeatures.p1_pos_step); } static __device__ float* p1_theta(int n) { return (float*)(c_imageFeatures.p1_theta_data + n * c_imageFeatures.p1_theta_step); } static __device__ float2* p2_pos(int n) { return (float2*)(c_imageFeatures.p2_pos_data + n * c_imageFeatures.p2_pos_step); } static __device__ float* d12(int n) { return (float*)(c_imageFeatures.d12_data + n * c_imageFeatures.d12_step); } static __device__ float2* r1(int n) { return (float2*)(c_imageFeatures.r1_data + n * c_imageFeatures.r1_step); } static __device__ float2* r2(int n) { return (float2*)(c_imageFeatures.r2_data + n * c_imageFeatures.r2_step); } }; __device__ float clampAngle(float a) { float res = a; while (res > 2.0f * CV_PI_F) res -= 2.0f * CV_PI_F; while (res < 0.0f) res += 2.0f * CV_PI_F; return res; } __device__ bool angleEq(float a, float b, float eps) { return (::fabs(clampAngle(a - b)) <= eps); } template <class FT, bool isTempl> __global__ void Guil_Full_buildFeatureList(const unsigned int* coordList, const float* thetaList, const int pointsCount, int* sizes, const int maxSize, const float xi, const float angleEpsilon, const float alphaScale, const float2 center, const float maxDist) { const float p1_theta = thetaList[blockIdx.x]; const unsigned int coord1 = coordList[blockIdx.x]; float2 p1_pos; p1_pos.x = (coord1 & 0xFFFF); p1_pos.y = (coord1 >> 16) & 0xFFFF; for (int i = threadIdx.x; i < pointsCount; i += blockDim.x) { const float p2_theta = thetaList[i]; const unsigned int coord2 = coordList[i]; float2 p2_pos; p2_pos.x = (coord2 & 0xFFFF); p2_pos.y = (coord2 >> 16) & 0xFFFF; if (angleEq(p1_theta - p2_theta, xi, angleEpsilon)) { const float2 d = p1_pos - p2_pos; float alpha12 = clampAngle(::atan2(d.y, d.x) - p1_theta); float d12 = ::sqrtf(d.x * d.x + d.y * d.y); if (d12 > maxDist) continue; float2 r1 = p1_pos - center; float2 r2 = p2_pos - center; const int n = __float2int_rn(alpha12 * alphaScale); const int ind = ::atomicAdd(sizes + n, 1); if (ind < maxSize) { if (!isTempl) { FT::p1_pos(n)[ind] = p1_pos; FT::p2_pos(n)[ind] = p2_pos; } FT::p1_theta(n)[ind] = p1_theta; FT::d12(n)[ind] = d12; if (isTempl) { FT::r1(n)[ind] = r1; FT::r2(n)[ind] = r2; } } } } } template <class FT, bool isTempl> void Guil_Full_buildFeatureList_caller(const unsigned int* coordList, const float* thetaList, int pointsCount, int* sizes, int maxSize, float xi, float angleEpsilon, int levels, float2 center, float maxDist) { const dim3 block(256); const dim3 grid(pointsCount); const float alphaScale = levels / (2.0f * CV_PI_F); Guil_Full_buildFeatureList<FT, isTempl><<<grid, block>>>(coordList, thetaList, pointsCount, sizes, maxSize, xi * (CV_PI_F / 180.0f), angleEpsilon * (CV_PI_F / 180.0f), alphaScale, center, maxDist); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); thrust::device_ptr<int> sizesPtr(sizes); thrust::transform(sizesPtr, sizesPtr + levels + 1, sizesPtr, device::bind2nd(device::minimum<int>(), maxSize)); } void Guil_Full_buildTemplFeatureList_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, int* sizes, int maxSize, float xi, float angleEpsilon, int levels, float2 center, float maxDist) { Guil_Full_buildFeatureList_caller<TemplFeatureTable, true>(coordList, thetaList, pointsCount, sizes, maxSize, xi, angleEpsilon, levels, center, maxDist); } void Guil_Full_buildImageFeatureList_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, int* sizes, int maxSize, float xi, float angleEpsilon, int levels, float2 center, float maxDist) { Guil_Full_buildFeatureList_caller<ImageFeatureTable, false>(coordList, thetaList, pointsCount, sizes, maxSize, xi, angleEpsilon, levels, center, maxDist); } __global__ void Guil_Full_calcOHist(const int* templSizes, const int* imageSizes, int* OHist, const float minAngle, const float maxAngle, const float iAngleStep, const int angleRange) { extern __shared__ int s_OHist[]; for (int i = threadIdx.x; i <= angleRange; i += blockDim.x) s_OHist[i] = 0; __syncthreads(); const int tIdx = blockIdx.x; const int level = blockIdx.y; const int tSize = templSizes[level]; if (tIdx < tSize) { const int imSize = imageSizes[level]; const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx]; for (int i = threadIdx.x; i < imSize; i += blockDim.x) { const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i]; const float angle = clampAngle(im_p1_theta - t_p1_theta); if (angle >= minAngle && angle <= maxAngle) { const int n = __float2int_rn((angle - minAngle) * iAngleStep); Emulation::smem::atomicAdd(&s_OHist[n], 1); } } } __syncthreads(); for (int i = threadIdx.x; i <= angleRange; i += blockDim.x) ::atomicAdd(OHist + i, s_OHist[i]); } void Guil_Full_calcOHist_gpu(const int* templSizes, const int* imageSizes, int* OHist, float minAngle, float maxAngle, float angleStep, int angleRange, int levels, int tMaxSize) { const dim3 block(256); const dim3 grid(tMaxSize, levels + 1); minAngle *= (CV_PI_F / 180.0f); maxAngle *= (CV_PI_F / 180.0f); angleStep *= (CV_PI_F / 180.0f); const size_t smemSize = (angleRange + 1) * sizeof(float); Guil_Full_calcOHist<<<grid, block, smemSize>>>(templSizes, imageSizes, OHist, minAngle, maxAngle, 1.0f / angleStep, angleRange); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void Guil_Full_calcSHist(const int* templSizes, const int* imageSizes, int* SHist, const float angle, const float angleEpsilon, const float minScale, const float maxScale, const float iScaleStep, const int scaleRange) { extern __shared__ int s_SHist[]; for (int i = threadIdx.x; i <= scaleRange; i += blockDim.x) s_SHist[i] = 0; __syncthreads(); const int tIdx = blockIdx.x; const int level = blockIdx.y; const int tSize = templSizes[level]; if (tIdx < tSize) { const int imSize = imageSizes[level]; const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx] + angle; const float t_d12 = TemplFeatureTable::d12(level)[tIdx] + angle; for (int i = threadIdx.x; i < imSize; i += blockDim.x) { const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i]; const float im_d12 = ImageFeatureTable::d12(level)[i]; if (angleEq(im_p1_theta, t_p1_theta, angleEpsilon)) { const float scale = im_d12 / t_d12; if (scale >= minScale && scale <= maxScale) { const int s = __float2int_rn((scale - minScale) * iScaleStep); Emulation::smem::atomicAdd(&s_SHist[s], 1); } } } } __syncthreads(); for (int i = threadIdx.x; i <= scaleRange; i += blockDim.x) ::atomicAdd(SHist + i, s_SHist[i]); } void Guil_Full_calcSHist_gpu(const int* templSizes, const int* imageSizes, int* SHist, float angle, float angleEpsilon, float minScale, float maxScale, float iScaleStep, int scaleRange, int levels, int tMaxSize) { const dim3 block(256); const dim3 grid(tMaxSize, levels + 1); angle *= (CV_PI_F / 180.0f); angleEpsilon *= (CV_PI_F / 180.0f); const size_t smemSize = (scaleRange + 1) * sizeof(float); Guil_Full_calcSHist<<<grid, block, smemSize>>>(templSizes, imageSizes, SHist, angle, angleEpsilon, minScale, maxScale, iScaleStep, scaleRange); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void Guil_Full_calcPHist(const int* templSizes, const int* imageSizes, PtrStepSzi PHist, const float angle, const float sinVal, const float cosVal, const float angleEpsilon, const float scale, const float idp) { const int tIdx = blockIdx.x; const int level = blockIdx.y; const int tSize = templSizes[level]; if (tIdx < tSize) { const int imSize = imageSizes[level]; const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx] + angle; float2 r1 = TemplFeatureTable::r1(level)[tIdx]; float2 r2 = TemplFeatureTable::r2(level)[tIdx]; r1 = r1 * scale; r2 = r2 * scale; r1 = make_float2(cosVal * r1.x - sinVal * r1.y, sinVal * r1.x + cosVal * r1.y); r2 = make_float2(cosVal * r2.x - sinVal * r2.y, sinVal * r2.x + cosVal * r2.y); for (int i = threadIdx.x; i < imSize; i += blockDim.x) { const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i]; const float2 im_p1_pos = ImageFeatureTable::p1_pos(level)[i]; const float2 im_p2_pos = ImageFeatureTable::p2_pos(level)[i]; if (angleEq(im_p1_theta, t_p1_theta, angleEpsilon)) { float2 c1, c2; c1 = im_p1_pos - r1; c1 = c1 * idp; c2 = im_p2_pos - r2; c2 = c2 * idp; if (::fabs(c1.x - c2.x) > 1 || ::fabs(c1.y - c2.y) > 1) continue; if (c1.y >= 0 && c1.y < PHist.rows - 2 && c1.x >= 0 && c1.x < PHist.cols - 2) ::atomicAdd(PHist.ptr(__float2int_rn(c1.y) + 1) + __float2int_rn(c1.x) + 1, 1); } } } } void Guil_Full_calcPHist_gpu(const int* templSizes, const int* imageSizes, PtrStepSzi PHist, float angle, float angleEpsilon, float scale, float dp, int levels, int tMaxSize) { const dim3 block(256); const dim3 grid(tMaxSize, levels + 1); angle *= (CV_PI_F / 180.0f); angleEpsilon *= (CV_PI_F / 180.0f); const float sinVal = ::sinf(angle); const float cosVal = ::cosf(angle); cudaSafeCall( cudaFuncSetCacheConfig(Guil_Full_calcPHist, cudaFuncCachePreferL1) ); Guil_Full_calcPHist<<<grid, block>>>(templSizes, imageSizes, PHist, angle, sinVal, cosVal, angleEpsilon, scale, 1.0f / dp); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void Guil_Full_findPosInHist(const PtrStepSzi hist, float4* out, int3* votes, const int maxSize, const float angle, const int angleVotes, const float scale, const int scaleVotes, const float dp, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= hist.cols - 2 || y >= hist.rows - 2) return; const int curVotes = hist(y + 1, x + 1); if (curVotes > threshold && curVotes > hist(y + 1, x) && curVotes >= hist(y + 1, x + 2) && curVotes > hist(y, x + 1) && curVotes >= hist(y + 2, x + 1)) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float4(x * dp, y * dp, scale, angle); votes[ind] = make_int3(curVotes, scaleVotes, angleVotes); } } } int Guil_Full_findPosInHist_gpu(PtrStepSzi hist, float4* out, int3* votes, int curSize, int maxSize, float angle, int angleVotes, float scale, int scaleVotes, float dp, int threshold) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemcpy(counterPtr, &curSize, sizeof(int), cudaMemcpyHostToDevice) ); const dim3 block(32, 8); const dim3 grid(divUp(hist.cols - 2, block.x), divUp(hist.rows - 2, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(Guil_Full_findPosInHist, cudaFuncCachePreferL1) ); Guil_Full_findPosInHist<<<grid, block>>>(hist, out, votes, maxSize, angle, angleVotes, scale, scaleVotes, dp, threshold); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); return totalCount; } } }}} #endif // HAVE_OPENCV_CUDAARITHM #endif /* CUDA_DISABLER */
4f43c51bdfa5d297a40d06a517bacc7366d3fb7b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> extern int N; __global__ void gpuReduction(int *g_num, int *g_sum, int TotalNum) { __shared__ int l_sum[block_dim]; int i =blockIdx.x * blockDim.x + threadIdx.x; int l_i = threadIdx.x; l_sum[l_i]=(i < TotalNum) ? g_num[i] : 0; __syncthreads(); for(int p = blockDim.x/2 ; p >= 1; p = p>>1){ if (l_i < p) l_sum[l_i] += l_sum[l_i + p]; __syncthreads(); } if(l_i == 0){ g_sum[blockIdx.x] = l_sum[0]; } } double reduction_cuda(int *array, int N) { // TODO: implement host code here int sum=0; int i; size_t block_dim =256; size_t num_blocks = N / block_dim; int *d_array; int *d_partial_sum; int *partial_sum=(int *)malloc(sizeof(int)*num_blocks); hipMalloc(&d_array, sizeof(int)*N); hipMalloc(&d_partial_sum, sizeof(int)*num_blocks); hipMemcpy(d_array, array, sizeof(int)*N, hipMemcpyHostToDevice); dim3 grid(num_blocks); dim3 block(block_dim); hipLaunchKernelGGL(( gpuReduction) , dim3(grid), dim3(block) , sizeof(int)*block_dim , 0, d_array, d_partial_sum, N); hipMemcpy(partial_sum, d_partial_sum, sizeof(int)*num_blocks, hipMemcpyDeviceToHost); for( i=0; i<num_blocks; i++){ sum += partial_sum[i]; } free(partial_sum); free(d_array); hipFree(d_partial_sum); return (double)sum / N ; }
4f43c51bdfa5d297a40d06a517bacc7366d3fb7b.cu
#include <stdio.h> #include <stdlib.h> extern int N; __global__ void gpuReduction(int *g_num, int *g_sum, int TotalNum) { __shared__ int l_sum[block_dim]; int i =blockIdx.x * blockDim.x + threadIdx.x; int l_i = threadIdx.x; l_sum[l_i]=(i < TotalNum) ? g_num[i] : 0; __syncthreads(); for(int p = blockDim.x/2 ; p >= 1; p = p>>1){ if (l_i < p) l_sum[l_i] += l_sum[l_i + p]; __syncthreads(); } if(l_i == 0){ g_sum[blockIdx.x] = l_sum[0]; } } double reduction_cuda(int *array, int N) { // TODO: implement host code here int sum=0; int i; size_t block_dim =256; size_t num_blocks = N / block_dim; int *d_array; int *d_partial_sum; int *partial_sum=(int *)malloc(sizeof(int)*num_blocks); cudaMalloc(&d_array, sizeof(int)*N); cudaMalloc(&d_partial_sum, sizeof(int)*num_blocks); cudaMemcpy(d_array, array, sizeof(int)*N, cudaMemcpyHostToDevice); dim3 grid(num_blocks); dim3 block(block_dim); gpuReduction <<< grid, block , sizeof(int)*block_dim >>>(d_array, d_partial_sum, N); cudaMemcpy(partial_sum, d_partial_sum, sizeof(int)*num_blocks, cudaMemcpyDeviceToHost); for( i=0; i<num_blocks; i++){ sum += partial_sum[i]; } free(partial_sum); free(d_array); cudaFree(d_partial_sum); return (double)sum / N ; }
4cd9be8b877a4daa3a67f17d8b5a32e0bdcd9fed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpu/mblas/nth_element.h" #include <iostream> namespace GPU { static void HandleError(hipError_t err, const char *file, int line ) { if (err != hipSuccess) { std::cerr << "ERROR: " << hipGetErrorString(err) << " in " << file << " at line " << line << std::endl; exit( EXIT_FAILURE ); } } #define UNROLL_MAXARG_LOOP( n, max ) \ if (tid < (n) && tid + (n) < ( max ) ) { \ if (sdata[tid + ( n ) ] > sdata[tid]) { \ sdata[tid] = sdata[tid + ( n ) ]; \ indices[tid] = indices[tid + ( n ) ]; \ } \ } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) __global__ void gMaxElement(float* d_out, int* d_ind, float* d_in, int numBatches, int* batchFirstElementIdxs) { extern __shared__ float sdata[]; __shared__ int indices[512]; int tid = threadIdx.x; for (int batchIdx = 0; batchIdx < numBatches; ++batchIdx) { int begin = batchFirstElementIdxs[batchIdx]; int end = batchFirstElementIdxs[batchIdx + 1]; int i = begin + blockIdx.x * (blockDim.x * 2) + tid; sdata[tid] = -3.40282e+38f; if (i < end) { sdata[tid] = d_in[i]; indices[tid] = i; } if (i + blockDim.x < end) { float a = d_in[i]; float b = d_in[i + blockDim.x]; if (a > b) { sdata[tid] = a; indices[tid] = i; } else { sdata[tid] = b; indices[tid] = i + blockDim.x; } } while (i + 2 * gridDim.x * blockDim.x < end) { i += 2 * gridDim.x * blockDim.x; float a = d_in[i]; if (a > sdata[tid]) { sdata[tid] = a; indices[tid] = i; } if (i + blockDim.x < end) { float b = d_in[i + blockDim.x]; if (b > sdata[tid]) { sdata[tid] = b; indices[tid] = i + blockDim.x; } } } __syncthreads(); for (int s = (blockDim.x >> 1); s > 32; s >>= 1) { if (tid < s && tid + s < end) { if (sdata[tid + s] > sdata[tid]) { sdata[tid] = sdata[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } UNROLL_MAXARG_LOOP(32, end); UNROLL_MAXARG_LOOP(16, end); UNROLL_MAXARG_LOOP(8, end); UNROLL_MAXARG_LOOP(4, end); UNROLL_MAXARG_LOOP(2, end); UNROLL_MAXARG_LOOP(1, end); if (tid == 0) { d_out[blockIdx.x + batchIdx * gridDim.x] = sdata[0]; d_ind[blockIdx.x + batchIdx * gridDim.x] = indices[0]; } __syncthreads(); } } __global__ void gMaxElementUpdate(float* binCosts, int* binIdxs, float* probs, int *batchFirstElements, float* outCosts, int* outIdxs, int *cummulatedBeamSizes, int NUM_BLOCKS) { extern __shared__ float sdata[]; __shared__ int indices[512]; __shared__ float bestBinCost; __shared__ int bestBinCostIdx; const int tid = threadIdx.x; const int batchIdx = blockIdx.x; const int N = batchFirstElements[batchIdx + 1] - batchFirstElements[batchIdx]; int num_bins = int(N / (2 * 512)) + int(N % (2 * 512) != 0); if (num_bins > 500) { num_bins = 500; } for (int pos = cummulatedBeamSizes[batchIdx]; pos < cummulatedBeamSizes[batchIdx + 1]; ++pos) { int i = tid; sdata[tid] = -3.40282e+38f; if (i < num_bins) { sdata[tid] = binCosts[batchIdx * NUM_BLOCKS + i]; indices[tid] = i; } if (i + blockDim.x < num_bins) { float a = binCosts[batchIdx * NUM_BLOCKS + i]; float b = binCosts[batchIdx * NUM_BLOCKS + i + blockDim.x]; if (a > b) { sdata[tid] = a; indices[tid] = i; } else { sdata[tid] = b; indices[tid] = i + blockDim.x; } } while (i + 2 * blockDim.x < num_bins) { i += 2 * blockDim.x; float a = binCosts[batchIdx * NUM_BLOCKS + i]; if (a > sdata[tid]) { sdata[tid] = a; indices[tid] = i; } if (i + blockDim.x < num_bins) { float b = binCosts[batchIdx * NUM_BLOCKS + i + blockDim.x]; if (b > sdata[tid]) { sdata[tid] = b; indices[tid] = i + blockDim.x; } } } __syncthreads(); for (int s = (blockDim.x >> 1); s > 32; s >>= 1) { if (tid < s && tid + s < num_bins) { if (sdata[tid + s] > sdata[tid]) { sdata[tid] = sdata[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } UNROLL_MAXARG_LOOP(32, num_bins); UNROLL_MAXARG_LOOP(16, num_bins); UNROLL_MAXARG_LOOP(8, num_bins); UNROLL_MAXARG_LOOP(4, num_bins); UNROLL_MAXARG_LOOP(2, num_bins); UNROLL_MAXARG_LOOP(1, num_bins); if (tid == 0) { bestBinCost = sdata[0]; bestBinCostIdx = batchIdx * NUM_BLOCKS + indices[0]; probs[binIdxs[bestBinCostIdx]] = -3.40282e+38f; outIdxs[pos] = binIdxs[bestBinCostIdx]; outCosts[pos] = bestBinCost; } __syncthreads(); i = batchFirstElements[batchIdx] + (bestBinCostIdx - batchIdx * NUM_BLOCKS) * (blockDim.x * 2) + tid; const int dist = num_bins * 2 * blockDim.x; sdata[tid] = -3.40282e+38f; if (i < batchFirstElements[batchIdx + 1]) { sdata[tid] = probs[i]; indices[tid] = i; } if (i + blockDim.x < batchFirstElements[batchIdx + 1]) { float a = probs[i]; float b = probs[i+blockDim.x]; if (a > b) { sdata[tid] = a; indices[tid] = i; } else { sdata[tid] = b; indices[tid] = i + blockDim.x; } } while (i + dist < batchFirstElements[batchIdx + 1]) { i += dist; float a = probs[i]; if (a > sdata[tid]) { sdata[tid] = a; indices[tid] = i; } if (i + blockDim.x < batchFirstElements[batchIdx + 1]) { float b = probs[i + blockDim.x]; if (b > sdata[tid]) { sdata[tid] = b; indices[tid] = i + blockDim.x; } } } __syncthreads(); for (int s = (blockDim.x >> 1); s > 32; s >>= 1) { if (tid < s && tid + s < batchFirstElements[batchIdx + 1]) { if (sdata[tid + s] > sdata[tid]) { sdata[tid] = sdata[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } UNROLL_MAXARG_LOOP(32, batchFirstElements[batchIdx + 1]); UNROLL_MAXARG_LOOP(16, batchFirstElements[batchIdx + 1]); UNROLL_MAXARG_LOOP(8, batchFirstElements[batchIdx + 1]); UNROLL_MAXARG_LOOP(4, batchFirstElements[batchIdx + 1]); UNROLL_MAXARG_LOOP(2, batchFirstElements[batchIdx + 1]); UNROLL_MAXARG_LOOP(1, batchFirstElements[batchIdx + 1]); if (tid == 0) { binCosts[bestBinCostIdx] = sdata[0]; binIdxs[bestBinCostIdx] = indices[0]; } __syncthreads(); } } __global__ void gGetValueByKey(float* d_in, float* d_out, int* indeces, int n) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid < n) { int index = indeces[tid]; d_out[tid] = d_in[index]; } } NthElement::NthElement(size_t maxBeamSize, size_t maxBatchSize, hipStream_t& stream) : stream_(stream) , NUM_BLOCKS(::min(500, int(maxBeamSize * 85000 / (2 * BLOCK_SIZE)) + int(maxBeamSize * 85000 % (2 * BLOCK_SIZE) != 0))) { //std::cerr << "NthElement::NthElement" << std::endl; HANDLE_ERROR( hipMalloc((void**)&d_ind, maxBatchSize * NUM_BLOCKS * sizeof(int)) ); HANDLE_ERROR( hipMalloc((void**)&d_out, maxBatchSize * NUM_BLOCKS * sizeof(float)) ); HANDLE_ERROR( hipMalloc((void**)&d_res_idx, maxBatchSize * maxBeamSize * sizeof(int)) ); HANDLE_ERROR( hipMalloc((void**)&d_res, maxBatchSize * maxBeamSize * sizeof(float)) ); HANDLE_ERROR( hipHostMalloc((void**) &h_res, maxBeamSize * maxBatchSize* sizeof(float), hipHostMallocDefault) ); HANDLE_ERROR( hipHostMalloc((void**) &h_res_idx, maxBeamSize * maxBatchSize * sizeof(int), hipHostMallocDefault) ); HANDLE_ERROR( hipMalloc((void**)&d_breakdown, maxBeamSize * sizeof(float)) ); HANDLE_ERROR( hipMalloc((void**)&d_batchPosition, (maxBatchSize + 1) * sizeof(int)) ); HANDLE_ERROR( hipMalloc((void**)&d_cumBeamSizes, (maxBatchSize + 1) * sizeof(int)) ); } NthElement::~NthElement() { HANDLE_ERROR(hipFree(d_ind)); HANDLE_ERROR(hipFree(d_out)); HANDLE_ERROR(hipFree(d_res_idx)); HANDLE_ERROR(hipFree(d_res)); HANDLE_ERROR(hipHostFree(h_res)); HANDLE_ERROR(hipHostFree(h_res_idx)); HANDLE_ERROR(hipFree(d_breakdown)); HANDLE_ERROR(hipFree(d_batchPosition)); HANDLE_ERROR(hipFree(d_cumBeamSizes)); } void NthElement::getNBestList(float* probs, const std::vector<int>& batchFirstElementIdxs, const std::vector<int>& cummulatedBeamSizes) { HANDLE_ERROR( hipMemcpyAsync(d_batchPosition, batchFirstElementIdxs.data(), batchFirstElementIdxs.size() * sizeof(int), hipMemcpyHostToDevice, stream_) ); HANDLE_ERROR( hipMemcpyAsync(d_cumBeamSizes, cummulatedBeamSizes.data(), cummulatedBeamSizes.size() * sizeof(int), hipMemcpyHostToDevice, stream_) ); const int numBatches = batchFirstElementIdxs.size() - 1; hipLaunchKernelGGL(( gMaxElement), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), BLOCK_SIZE * sizeof(float), stream_, d_out, d_ind, probs, numBatches, d_batchPosition); hipLaunchKernelGGL(( gMaxElementUpdate), dim3(numBatches), dim3(BLOCK_SIZE), BLOCK_SIZE * sizeof(float), stream_, d_out, d_ind, probs, d_batchPosition, d_res, d_res_idx, d_cumBeamSizes, NUM_BLOCKS); } void NthElement::getNBestList(const std::vector<size_t>& beamSizes, mblas::Matrix& Probs, std::vector<float>& outCosts, std::vector<unsigned>& outKeys, const bool isFirst) { std::vector<int> cummulatedBeamSizes(beamSizes.size() + 1, 0); std::vector<int> batchFirstElementIdxs(beamSizes.size() + 1, 0); const size_t vocabSize = Probs.Cols(); for (size_t i = 0; i < beamSizes.size(); ++i) { cummulatedBeamSizes[i + 1] = cummulatedBeamSizes[i] + beamSizes[i]; batchFirstElementIdxs[i + 1] += ((isFirst) ? (i + 1) : cummulatedBeamSizes[i + 1]) * vocabSize; } getNBestList(Probs.data(), batchFirstElementIdxs, cummulatedBeamSizes); GetPairs(cummulatedBeamSizes.back(), outKeys, outCosts); } void NthElement::GetPairs(size_t number, std::vector<unsigned>& outKeys, std::vector<float>& outValues) { HANDLE_ERROR( hipMemcpyAsync(h_res, d_res, number * sizeof(float), hipMemcpyDeviceToHost, stream_) ); HANDLE_ERROR( hipMemcpyAsync(h_res_idx, d_res_idx, number * sizeof(int), hipMemcpyDeviceToHost, stream_) ); hipStreamSynchronize(stream_); for (size_t i = 0; i < number; ++i) { outKeys.push_back(h_res_idx[i]); outValues.push_back(h_res[i]); } lastN = number; } void NthElement::getValueByKey(std::vector<float>& out, float* d_in) { hipLaunchKernelGGL(( gGetValueByKey), dim3(1), dim3(lastN), 0, stream_, d_in, d_breakdown, h_res_idx, lastN); HANDLE_ERROR( hipMemcpyAsync(out.data(), d_breakdown, lastN * sizeof(float), hipMemcpyDeviceToHost, stream_) ); hipStreamSynchronize(stream_); } } // namespace GPU
4cd9be8b877a4daa3a67f17d8b5a32e0bdcd9fed.cu
#include "gpu/mblas/nth_element.h" #include <iostream> namespace GPU { static void HandleError(cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { std::cerr << "ERROR: " << cudaGetErrorString(err) << " in " << file << " at line " << line << std::endl; exit( EXIT_FAILURE ); } } #define UNROLL_MAXARG_LOOP( n, max ) \ if (tid < (n) && tid + (n) < ( max ) ) { \ if (sdata[tid + ( n ) ] > sdata[tid]) { \ sdata[tid] = sdata[tid + ( n ) ]; \ indices[tid] = indices[tid + ( n ) ]; \ } \ } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) __global__ void gMaxElement(float* d_out, int* d_ind, float* d_in, int numBatches, int* batchFirstElementIdxs) { extern __shared__ float sdata[]; __shared__ int indices[512]; int tid = threadIdx.x; for (int batchIdx = 0; batchIdx < numBatches; ++batchIdx) { int begin = batchFirstElementIdxs[batchIdx]; int end = batchFirstElementIdxs[batchIdx + 1]; int i = begin + blockIdx.x * (blockDim.x * 2) + tid; sdata[tid] = -3.40282e+38f; if (i < end) { sdata[tid] = d_in[i]; indices[tid] = i; } if (i + blockDim.x < end) { float a = d_in[i]; float b = d_in[i + blockDim.x]; if (a > b) { sdata[tid] = a; indices[tid] = i; } else { sdata[tid] = b; indices[tid] = i + blockDim.x; } } while (i + 2 * gridDim.x * blockDim.x < end) { i += 2 * gridDim.x * blockDim.x; float a = d_in[i]; if (a > sdata[tid]) { sdata[tid] = a; indices[tid] = i; } if (i + blockDim.x < end) { float b = d_in[i + blockDim.x]; if (b > sdata[tid]) { sdata[tid] = b; indices[tid] = i + blockDim.x; } } } __syncthreads(); for (int s = (blockDim.x >> 1); s > 32; s >>= 1) { if (tid < s && tid + s < end) { if (sdata[tid + s] > sdata[tid]) { sdata[tid] = sdata[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } UNROLL_MAXARG_LOOP(32, end); UNROLL_MAXARG_LOOP(16, end); UNROLL_MAXARG_LOOP(8, end); UNROLL_MAXARG_LOOP(4, end); UNROLL_MAXARG_LOOP(2, end); UNROLL_MAXARG_LOOP(1, end); if (tid == 0) { d_out[blockIdx.x + batchIdx * gridDim.x] = sdata[0]; d_ind[blockIdx.x + batchIdx * gridDim.x] = indices[0]; } __syncthreads(); } } __global__ void gMaxElementUpdate(float* binCosts, int* binIdxs, float* probs, int *batchFirstElements, float* outCosts, int* outIdxs, int *cummulatedBeamSizes, int NUM_BLOCKS) { extern __shared__ float sdata[]; __shared__ int indices[512]; __shared__ float bestBinCost; __shared__ int bestBinCostIdx; const int tid = threadIdx.x; const int batchIdx = blockIdx.x; const int N = batchFirstElements[batchIdx + 1] - batchFirstElements[batchIdx]; int num_bins = int(N / (2 * 512)) + int(N % (2 * 512) != 0); if (num_bins > 500) { num_bins = 500; } for (int pos = cummulatedBeamSizes[batchIdx]; pos < cummulatedBeamSizes[batchIdx + 1]; ++pos) { int i = tid; sdata[tid] = -3.40282e+38f; if (i < num_bins) { sdata[tid] = binCosts[batchIdx * NUM_BLOCKS + i]; indices[tid] = i; } if (i + blockDim.x < num_bins) { float a = binCosts[batchIdx * NUM_BLOCKS + i]; float b = binCosts[batchIdx * NUM_BLOCKS + i + blockDim.x]; if (a > b) { sdata[tid] = a; indices[tid] = i; } else { sdata[tid] = b; indices[tid] = i + blockDim.x; } } while (i + 2 * blockDim.x < num_bins) { i += 2 * blockDim.x; float a = binCosts[batchIdx * NUM_BLOCKS + i]; if (a > sdata[tid]) { sdata[tid] = a; indices[tid] = i; } if (i + blockDim.x < num_bins) { float b = binCosts[batchIdx * NUM_BLOCKS + i + blockDim.x]; if (b > sdata[tid]) { sdata[tid] = b; indices[tid] = i + blockDim.x; } } } __syncthreads(); for (int s = (blockDim.x >> 1); s > 32; s >>= 1) { if (tid < s && tid + s < num_bins) { if (sdata[tid + s] > sdata[tid]) { sdata[tid] = sdata[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } UNROLL_MAXARG_LOOP(32, num_bins); UNROLL_MAXARG_LOOP(16, num_bins); UNROLL_MAXARG_LOOP(8, num_bins); UNROLL_MAXARG_LOOP(4, num_bins); UNROLL_MAXARG_LOOP(2, num_bins); UNROLL_MAXARG_LOOP(1, num_bins); if (tid == 0) { bestBinCost = sdata[0]; bestBinCostIdx = batchIdx * NUM_BLOCKS + indices[0]; probs[binIdxs[bestBinCostIdx]] = -3.40282e+38f; outIdxs[pos] = binIdxs[bestBinCostIdx]; outCosts[pos] = bestBinCost; } __syncthreads(); i = batchFirstElements[batchIdx] + (bestBinCostIdx - batchIdx * NUM_BLOCKS) * (blockDim.x * 2) + tid; const int dist = num_bins * 2 * blockDim.x; sdata[tid] = -3.40282e+38f; if (i < batchFirstElements[batchIdx + 1]) { sdata[tid] = probs[i]; indices[tid] = i; } if (i + blockDim.x < batchFirstElements[batchIdx + 1]) { float a = probs[i]; float b = probs[i+blockDim.x]; if (a > b) { sdata[tid] = a; indices[tid] = i; } else { sdata[tid] = b; indices[tid] = i + blockDim.x; } } while (i + dist < batchFirstElements[batchIdx + 1]) { i += dist; float a = probs[i]; if (a > sdata[tid]) { sdata[tid] = a; indices[tid] = i; } if (i + blockDim.x < batchFirstElements[batchIdx + 1]) { float b = probs[i + blockDim.x]; if (b > sdata[tid]) { sdata[tid] = b; indices[tid] = i + blockDim.x; } } } __syncthreads(); for (int s = (blockDim.x >> 1); s > 32; s >>= 1) { if (tid < s && tid + s < batchFirstElements[batchIdx + 1]) { if (sdata[tid + s] > sdata[tid]) { sdata[tid] = sdata[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } UNROLL_MAXARG_LOOP(32, batchFirstElements[batchIdx + 1]); UNROLL_MAXARG_LOOP(16, batchFirstElements[batchIdx + 1]); UNROLL_MAXARG_LOOP(8, batchFirstElements[batchIdx + 1]); UNROLL_MAXARG_LOOP(4, batchFirstElements[batchIdx + 1]); UNROLL_MAXARG_LOOP(2, batchFirstElements[batchIdx + 1]); UNROLL_MAXARG_LOOP(1, batchFirstElements[batchIdx + 1]); if (tid == 0) { binCosts[bestBinCostIdx] = sdata[0]; binIdxs[bestBinCostIdx] = indices[0]; } __syncthreads(); } } __global__ void gGetValueByKey(float* d_in, float* d_out, int* indeces, int n) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid < n) { int index = indeces[tid]; d_out[tid] = d_in[index]; } } NthElement::NthElement(size_t maxBeamSize, size_t maxBatchSize, cudaStream_t& stream) : stream_(stream) , NUM_BLOCKS(std::min(500, int(maxBeamSize * 85000 / (2 * BLOCK_SIZE)) + int(maxBeamSize * 85000 % (2 * BLOCK_SIZE) != 0))) { //std::cerr << "NthElement::NthElement" << std::endl; HANDLE_ERROR( cudaMalloc((void**)&d_ind, maxBatchSize * NUM_BLOCKS * sizeof(int)) ); HANDLE_ERROR( cudaMalloc((void**)&d_out, maxBatchSize * NUM_BLOCKS * sizeof(float)) ); HANDLE_ERROR( cudaMalloc((void**)&d_res_idx, maxBatchSize * maxBeamSize * sizeof(int)) ); HANDLE_ERROR( cudaMalloc((void**)&d_res, maxBatchSize * maxBeamSize * sizeof(float)) ); HANDLE_ERROR( cudaHostAlloc((void**) &h_res, maxBeamSize * maxBatchSize* sizeof(float), cudaHostAllocDefault) ); HANDLE_ERROR( cudaHostAlloc((void**) &h_res_idx, maxBeamSize * maxBatchSize * sizeof(int), cudaHostAllocDefault) ); HANDLE_ERROR( cudaMalloc((void**)&d_breakdown, maxBeamSize * sizeof(float)) ); HANDLE_ERROR( cudaMalloc((void**)&d_batchPosition, (maxBatchSize + 1) * sizeof(int)) ); HANDLE_ERROR( cudaMalloc((void**)&d_cumBeamSizes, (maxBatchSize + 1) * sizeof(int)) ); } NthElement::~NthElement() { HANDLE_ERROR(cudaFree(d_ind)); HANDLE_ERROR(cudaFree(d_out)); HANDLE_ERROR(cudaFree(d_res_idx)); HANDLE_ERROR(cudaFree(d_res)); HANDLE_ERROR(cudaFreeHost(h_res)); HANDLE_ERROR(cudaFreeHost(h_res_idx)); HANDLE_ERROR(cudaFree(d_breakdown)); HANDLE_ERROR(cudaFree(d_batchPosition)); HANDLE_ERROR(cudaFree(d_cumBeamSizes)); } void NthElement::getNBestList(float* probs, const std::vector<int>& batchFirstElementIdxs, const std::vector<int>& cummulatedBeamSizes) { HANDLE_ERROR( cudaMemcpyAsync(d_batchPosition, batchFirstElementIdxs.data(), batchFirstElementIdxs.size() * sizeof(int), cudaMemcpyHostToDevice, stream_) ); HANDLE_ERROR( cudaMemcpyAsync(d_cumBeamSizes, cummulatedBeamSizes.data(), cummulatedBeamSizes.size() * sizeof(int), cudaMemcpyHostToDevice, stream_) ); const int numBatches = batchFirstElementIdxs.size() - 1; gMaxElement<<<NUM_BLOCKS, BLOCK_SIZE, BLOCK_SIZE * sizeof(float), stream_>>> (d_out, d_ind, probs, numBatches, d_batchPosition); gMaxElementUpdate<<<numBatches, BLOCK_SIZE, BLOCK_SIZE * sizeof(float), stream_>>> (d_out, d_ind, probs, d_batchPosition, d_res, d_res_idx, d_cumBeamSizes, NUM_BLOCKS); } void NthElement::getNBestList(const std::vector<size_t>& beamSizes, mblas::Matrix& Probs, std::vector<float>& outCosts, std::vector<unsigned>& outKeys, const bool isFirst) { std::vector<int> cummulatedBeamSizes(beamSizes.size() + 1, 0); std::vector<int> batchFirstElementIdxs(beamSizes.size() + 1, 0); const size_t vocabSize = Probs.Cols(); for (size_t i = 0; i < beamSizes.size(); ++i) { cummulatedBeamSizes[i + 1] = cummulatedBeamSizes[i] + beamSizes[i]; batchFirstElementIdxs[i + 1] += ((isFirst) ? (i + 1) : cummulatedBeamSizes[i + 1]) * vocabSize; } getNBestList(Probs.data(), batchFirstElementIdxs, cummulatedBeamSizes); GetPairs(cummulatedBeamSizes.back(), outKeys, outCosts); } void NthElement::GetPairs(size_t number, std::vector<unsigned>& outKeys, std::vector<float>& outValues) { HANDLE_ERROR( cudaMemcpyAsync(h_res, d_res, number * sizeof(float), cudaMemcpyDeviceToHost, stream_) ); HANDLE_ERROR( cudaMemcpyAsync(h_res_idx, d_res_idx, number * sizeof(int), cudaMemcpyDeviceToHost, stream_) ); cudaStreamSynchronize(stream_); for (size_t i = 0; i < number; ++i) { outKeys.push_back(h_res_idx[i]); outValues.push_back(h_res[i]); } lastN = number; } void NthElement::getValueByKey(std::vector<float>& out, float* d_in) { gGetValueByKey<<<1, lastN, 0, stream_>>> (d_in, d_breakdown, h_res_idx, lastN); HANDLE_ERROR( cudaMemcpyAsync(out.data(), d_breakdown, lastN * sizeof(float), cudaMemcpyDeviceToHost, stream_) ); cudaStreamSynchronize(stream_); } } // namespace GPU
3edac09394ef3656784f2f47a76dbf49a22f2862.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/utilities/base_fixture.hpp> #include <tests/utilities/column_utilities.hpp> #include <tests/utilities/column_wrapper.hpp> #include <tests/utilities/cudf_gtest.hpp> #include <tests/utilities/type_lists.hpp> #include <cudf/concatenate.hpp> #include <cudf/io/data_sink.hpp> #include <cudf/io/functions.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <fstream> #include <type_traits> namespace cudf_io = cudf::io; template <typename T> using column_wrapper = typename std::conditional<std::is_same<T, cudf::string_view>::value, cudf::test::strings_column_wrapper, cudf::test::fixed_width_column_wrapper<T>>::type; using column = cudf::column; using table = cudf::table; using table_view = cudf::table_view; // Global environment for temporary files auto const temp_env = static_cast<cudf::test::TempDirTestEnvironment*>( ::testing::AddGlobalTestEnvironment(new cudf::test::TempDirTestEnvironment)); template <typename T, typename Elements> std::unique_ptr<cudf::table> create_fixed_table(cudf::size_type num_columns, cudf::size_type num_rows, bool include_validity, Elements elements) { auto valids = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<cudf::test::fixed_width_column_wrapper<T>> src_cols(num_columns); for (int idx = 0; idx < num_columns; idx++) { if (include_validity) { src_cols[idx] = cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_rows, valids); } else { src_cols[idx] = cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_rows); } } std::vector<std::unique_ptr<cudf::column>> columns(num_columns); std::transform(src_cols.begin(), src_cols.end(), columns.begin(), [](cudf::test::fixed_width_column_wrapper<T>& in) { auto ret = in.release(); ret->has_nulls(); return ret; }); return std::make_unique<cudf::table>(std::move(columns)); } template <typename T> std::unique_ptr<cudf::table> create_random_fixed_table(cudf::size_type num_columns, cudf::size_type num_rows, bool include_validity) { auto rand_elements = cudf::test::make_counting_transform_iterator(0, [](T i) { return rand(); }); return create_fixed_table<T>(num_columns, num_rows, include_validity, rand_elements); } template <typename T> std::unique_ptr<cudf::table> create_compressible_fixed_table(cudf::size_type num_columns, cudf::size_type num_rows, cudf::size_type period, bool include_validity) { auto compressible_elements = cudf::test::make_counting_transform_iterator(0, [period](T i) { return i / period; }); return create_fixed_table<T>(num_columns, num_rows, include_validity, compressible_elements); } // Base test fixture for tests struct ParquetWriterTest : public cudf::test::BaseFixture { }; // Base test fixture for "stress" tests struct ParquetWriterStressTest : public cudf::test::BaseFixture { }; // Typed test fixture for numeric type tests template <typename T> struct ParquetWriterNumericTypeTest : public ParquetWriterTest { auto type() { return cudf::data_type{cudf::type_to_id<T>()}; } }; // Typed test fixture for timestamp type tests template <typename T> struct ParquetWriterTimestampTypeTest : public ParquetWriterTest { auto type() { return cudf::data_type{cudf::type_to_id<T>()}; } }; // Declare typed test cases TYPED_TEST_CASE(ParquetWriterNumericTypeTest, cudf::test::NumericTypes); using SupportedTimestampTypes = cudf::test::TimestampTypes; TYPED_TEST_CASE(ParquetWriterTimestampTypeTest, SupportedTimestampTypes); // Base test fixture for chunked writer tests struct ParquetChunkedWriterTest : public cudf::test::BaseFixture { }; // Typed test fixture for numeric type tests template <typename T> struct ParquetChunkedWriterNumericTypeTest : public ParquetChunkedWriterTest { auto type() { return cudf::data_type{cudf::type_to_id<T>()}; } }; // Declare typed test cases TYPED_TEST_CASE(ParquetChunkedWriterNumericTypeTest, cudf::test::NumericTypes); namespace { // Generates a vector of uniform random values of type T template <typename T> inline auto random_values(size_t size) { std::vector<T> values(size); using T1 = T; using uniform_distribution = typename std::conditional_t<std::is_same<T1, bool>::value, std::bernoulli_distribution, std::conditional_t<std::is_floating_point<T1>::value, std::uniform_real_distribution<T1>, std::uniform_int_distribution<T1>>>; static constexpr auto seed = 0xf00d; static std::mt19937 engine{seed}; static uniform_distribution dist{}; std::generate_n(values.begin(), size, [&]() { return T{dist(engine)}; }); return values; } // Helper function to compare two tables void expect_tables_equal(cudf::table_view const& lhs, cudf::table_view const& rhs) { EXPECT_EQ(lhs.num_columns(), rhs.num_columns()); auto expected = lhs.begin(); auto result = rhs.begin(); while (result != rhs.end()) { cudf::test::expect_columns_equal(*expected++, *result++); } } } // namespace TYPED_TEST(ParquetWriterNumericTypeTest, SingleColumn) { auto sequence = cudf::test::make_counting_transform_iterator(0, [](auto i) { return TypeParam(i); }); auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("SingleColumn.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TYPED_TEST(ParquetWriterNumericTypeTest, SingleColumnWithNulls) { auto sequence = cudf::test::make_counting_transform_iterator(0, [](auto i) { return TypeParam(i); }); auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return (i % 2); }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("SingleColumnWithNulls.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TYPED_TEST(ParquetWriterTimestampTypeTest, Timestamps) { auto sequence = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return TypeParam((std::rand() / 10000) * 1000); }); auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("Timestamps.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; in_args.timestamp_type = this->type(); auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TYPED_TEST(ParquetWriterTimestampTypeTest, TimestampsWithNulls) { auto sequence = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return TypeParam((std::rand() / 10000) * 1000); }); auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return (i > 30) && (i < 60); }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("TimestampsWithNulls.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; in_args.timestamp_type = this->type(); auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TEST_F(ParquetWriterTest, MultiColumn) { constexpr auto num_rows = 100; // auto col0_data = random_values<bool>(num_rows); auto col1_data = random_values<int8_t>(num_rows); auto col2_data = random_values<int16_t>(num_rows); auto col3_data = random_values<int32_t>(num_rows); auto col4_data = random_values<float>(num_rows); auto col5_data = random_values<double>(num_rows); auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); // column_wrapper<bool> col0{ // col0_data.begin(), col0_data.end(), validity}; column_wrapper<int8_t> col1{col1_data.begin(), col1_data.end(), validity}; column_wrapper<int16_t> col2{col2_data.begin(), col2_data.end(), validity}; column_wrapper<int32_t> col3{col3_data.begin(), col3_data.end(), validity}; column_wrapper<float> col4{col4_data.begin(), col4_data.end(), validity}; column_wrapper<double> col5{col5_data.begin(), col5_data.end(), validity}; cudf_io::table_metadata expected_metadata; // expected_metadata.column_names.emplace_back("bools"); expected_metadata.column_names.emplace_back("int8s"); expected_metadata.column_names.emplace_back("int16s"); expected_metadata.column_names.emplace_back("int32s"); expected_metadata.column_names.emplace_back("floats"); expected_metadata.column_names.emplace_back("doubles"); std::vector<std::unique_ptr<column>> cols; // cols.push_back(col0.release()); cols.push_back(col1.release()); cols.push_back(col2.release()); cols.push_back(col3.release()); cols.push_back(col4.release()); cols.push_back(col5.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(5, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("MultiColumn.parquet"); cudf_io::write_parquet_args out_args{ cudf_io::sink_info{filepath}, expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, MultiColumnWithNulls) { constexpr auto num_rows = 100; // auto col0_data = random_values<bool>(num_rows); auto col1_data = random_values<int8_t>(num_rows); auto col2_data = random_values<int16_t>(num_rows); auto col3_data = random_values<int32_t>(num_rows); auto col4_data = random_values<float>(num_rows); auto col5_data = random_values<double>(num_rows); // auto col0_mask = cudf::test::make_counting_transform_iterator( // 0, [](auto i) { return (i % 2); }); auto col1_mask = cudf::test::make_counting_transform_iterator(0, [](auto i) { return (i < 10); }); auto col2_mask = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); auto col3_mask = cudf::test::make_counting_transform_iterator(0, [](auto i) { return (i == (num_rows - 1)); }); auto col4_mask = cudf::test::make_counting_transform_iterator(0, [](auto i) { return (i >= 40 || i <= 60); }); auto col5_mask = cudf::test::make_counting_transform_iterator(0, [](auto i) { return (i > 80); }); // column_wrapper<bool> col0{ // col0_data.begin(), col0_data.end(), col0_mask}; column_wrapper<int8_t> col1{col1_data.begin(), col1_data.end(), col1_mask}; column_wrapper<int16_t> col2{col2_data.begin(), col2_data.end(), col2_mask}; column_wrapper<int32_t> col3{col3_data.begin(), col3_data.end(), col3_mask}; column_wrapper<float> col4{col4_data.begin(), col4_data.end(), col4_mask}; column_wrapper<double> col5{col5_data.begin(), col5_data.end(), col5_mask}; cudf_io::table_metadata expected_metadata; // expected_metadata.column_names.emplace_back("bools"); expected_metadata.column_names.emplace_back("int8s"); expected_metadata.column_names.emplace_back("int16s"); expected_metadata.column_names.emplace_back("int32s"); expected_metadata.column_names.emplace_back("floats"); expected_metadata.column_names.emplace_back("doubles"); std::vector<std::unique_ptr<column>> cols; // cols.push_back(col0.release()); cols.push_back(col1.release()); cols.push_back(col2.release()); cols.push_back(col3.release()); cols.push_back(col4.release()); cols.push_back(col5.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(5, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("MultiColumnWithNulls.parquet"); cudf_io::write_parquet_args out_args{ cudf_io::sink_info{filepath}, expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, Strings) { std::vector<const char*> strings{ "Monday", "Monday", "Friday", "Monday", "Friday", "Friday", "Friday", "Funday"}; const auto num_rows = strings.size(); auto seq_col0 = random_values<int>(num_rows); auto seq_col2 = random_values<float>(num_rows); auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); column_wrapper<int> col0{seq_col0.begin(), seq_col0.end(), validity}; column_wrapper<cudf::string_view> col1{strings.begin(), strings.end()}; column_wrapper<float> col2{seq_col2.begin(), seq_col2.end(), validity}; cudf_io::table_metadata expected_metadata; expected_metadata.column_names.emplace_back("col_other"); expected_metadata.column_names.emplace_back("col_string"); expected_metadata.column_names.emplace_back("col_another"); std::vector<std::unique_ptr<column>> cols; cols.push_back(col0.release()); cols.push_back(col1.release()); cols.push_back(col2.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(3, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("Strings.parquet"); cudf_io::write_parquet_args out_args{ cudf_io::sink_info{filepath}, expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, MultiIndex) { constexpr auto num_rows = 100; auto col1_data = random_values<int8_t>(num_rows); auto col2_data = random_values<int16_t>(num_rows); auto col3_data = random_values<int32_t>(num_rows); auto col4_data = random_values<float>(num_rows); auto col5_data = random_values<double>(num_rows); auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); column_wrapper<int8_t> col1{col1_data.begin(), col1_data.end(), validity}; column_wrapper<int16_t> col2{col2_data.begin(), col2_data.end(), validity}; column_wrapper<int32_t> col3{col3_data.begin(), col3_data.end(), validity}; column_wrapper<float> col4{col4_data.begin(), col4_data.end(), validity}; column_wrapper<double> col5{col5_data.begin(), col5_data.end(), validity}; cudf_io::table_metadata expected_metadata; expected_metadata.column_names.emplace_back("int8s"); expected_metadata.column_names.emplace_back("int16s"); expected_metadata.column_names.emplace_back("int32s"); expected_metadata.column_names.emplace_back("floats"); expected_metadata.column_names.emplace_back("doubles"); expected_metadata.user_data.insert( {"pandas", "\"index_columns\": [\"floats\", \"doubles\"], \"column1\": [\"int8s\"]"}); std::vector<std::unique_ptr<column>> cols; cols.push_back(col1.release()); cols.push_back(col2.release()); cols.push_back(col3.release()); cols.push_back(col4.release()); cols.push_back(col5.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(5, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("MultiIndex.parquet"); cudf_io::write_parquet_args out_args{ cudf_io::sink_info{filepath}, expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; in_args.use_pandas_metadata = true; in_args.columns = {"int8s", "int16s", "int32s"}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, HostBuffer) { constexpr auto num_rows = 100 << 10; const auto seq_col = random_values<int>(num_rows); const auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); column_wrapper<int> col{seq_col.begin(), seq_col.end(), validity}; cudf_io::table_metadata expected_metadata; expected_metadata.column_names.emplace_back("col_other"); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); const auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); std::vector<char> out_buffer; cudf_io::write_parquet_args out_args{ cudf_io::sink_info(&out_buffer), expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info(out_buffer.data(), out_buffer.size())}; const auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, NonNullable) { srand(31337); auto expected = create_random_fixed_table<int>(9, 9, false); auto filepath = temp_env->get_temp_filepath("NonNullable.parquet"); cudf_io::write_parquet_args args{cudf_io::sink_info{filepath}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } // custom data sink that supports device writes. uses plain file io. class custom_test_data_sink : public cudf::io::data_sink { public: explicit custom_test_data_sink(std::string const& filepath) { outfile_.open(filepath, std::ios::out | std::ios::binary | std::ios::trunc); CUDF_EXPECTS(outfile_.is_open(), "Cannot open output file"); } virtual ~custom_test_data_sink() { flush(); } void host_write(void const* data, size_t size) override { outfile_.write(reinterpret_cast<char const*>(data), size); } bool supports_device_write() const override { return true; } void device_write(void const* gpu_data, size_t size, hipStream_t stream) { char* ptr = nullptr; CUDA_TRY(hipHostMalloc(&ptr, size)); CUDA_TRY(hipMemcpyAsync(ptr, gpu_data, size, hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); outfile_.write(reinterpret_cast<char const*>(ptr), size); CUDA_TRY(hipHostFree(ptr)); } void flush() override { outfile_.flush(); } size_t bytes_written() override { return outfile_.tellp(); } private: std::ofstream outfile_; }; TEST_F(ParquetWriterTest, CustomDataSink) { auto filepath = temp_env->get_temp_filepath("CustomDataSink.parquet"); custom_test_data_sink custom_sink(filepath); namespace cudf_io = cudf::io; srand(31337); auto expected = create_random_fixed_table<int>(5, 10, false); // write out using the custom sink { cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); } // write out using a memmapped sink std::vector<char> buf_sink; { cudf_io::write_parquet_args args{cudf_io::sink_info{&buf_sink}, *expected}; cudf_io::write_parquet(args); } // read them back in and make sure everything matches cudf_io::read_parquet_args custom_args{cudf_io::source_info{filepath}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); cudf_io::read_parquet_args buf_args{cudf_io::source_info{buf_sink.data(), buf_sink.size()}}; auto buf_tbl = cudf_io::read_parquet(buf_args); expect_tables_equal(buf_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterTest, DeviceWriteLargeishFile) { auto filepath = temp_env->get_temp_filepath("DeviceWriteLargeishFile.parquet"); custom_test_data_sink custom_sink(filepath); namespace cudf_io = cudf::io; // exercises multiple rowgroups srand(31337); auto expected = create_random_fixed_table<int>(4, 4 * 1024 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{filepath}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetChunkedWriterTest, SingleTable) { srand(31337); auto table1 = create_random_fixed_table<int>(5, 5, true); auto filepath = temp_env->get_temp_filepath("ChunkedSingle.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *table1); } TEST_F(ParquetChunkedWriterTest, SimpleTable) { srand(31337); auto table1 = create_random_fixed_table<int>(5, 5, true); auto table2 = create_random_fixed_table<int>(5, 5, true); auto full_table = cudf::concatenate({*table1, *table2}); auto filepath = temp_env->get_temp_filepath("ChunkedSimple.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked(*table2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *full_table); } TEST_F(ParquetChunkedWriterTest, LargeTables) { srand(31337); auto table1 = create_random_fixed_table<int>(512, 4096, true); auto table2 = create_random_fixed_table<int>(512, 8192, true); auto full_table = cudf::concatenate({*table1, *table2}); auto filepath = temp_env->get_temp_filepath("ChunkedLarge.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked(*table2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *full_table); } TEST_F(ParquetChunkedWriterTest, ManyTables) { srand(31337); std::vector<std::unique_ptr<table>> tables; std::vector<table_view> table_views; constexpr int num_tables = 96; for (int idx = 0; idx < num_tables; idx++) { auto tbl = create_random_fixed_table<int>(16, 64, true); table_views.push_back(*tbl); tables.push_back(std::move(tbl)); } auto expected = cudf::concatenate(table_views); auto filepath = temp_env->get_temp_filepath("ChunkedManyTables.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); std::for_each(table_views.begin(), table_views.end(), [&state](table_view const& tbl) { cudf_io::write_parquet_chunked(tbl, state); }); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } TEST_F(ParquetChunkedWriterTest, Strings) { std::vector<std::unique_ptr<cudf::column>> cols; bool mask1[] = {1, 1, 0, 1, 1, 1, 1}; std::vector<const char*> h_strings1{"four", "score", "and", "seven", "years", "ago", "abcdefgh"}; cudf::test::strings_column_wrapper strings1(h_strings1.begin(), h_strings1.end(), mask1); cols.push_back(strings1.release()); cudf::table tbl1(std::move(cols)); bool mask2[] = {0, 1, 1, 1, 1, 1, 1}; std::vector<const char*> h_strings2{"ooooo", "ppppppp", "fff", "j", "cccc", "bbb", "zzzzzzzzzzz"}; cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), mask2); cols.push_back(strings2.release()); cudf::table tbl2(std::move(cols)); auto expected = cudf::concatenate({tbl1, tbl2}); auto filepath = temp_env->get_temp_filepath("ChunkedStrings.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(tbl1, state); cudf_io::write_parquet_chunked(tbl2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } TEST_F(ParquetChunkedWriterTest, MismatchedTypes) { srand(31337); auto table1 = create_random_fixed_table<int>(4, 4, true); auto table2 = create_random_fixed_table<float>(4, 4, true); auto filepath = temp_env->get_temp_filepath("ChunkedMismatchedTypes.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); EXPECT_THROW(cudf_io::write_parquet_chunked(*table2, state), cudf::logic_error); cudf_io::write_parquet_chunked_end(state); } TEST_F(ParquetChunkedWriterTest, MismatchedStructure) { srand(31337); auto table1 = create_random_fixed_table<int>(4, 4, true); auto table2 = create_random_fixed_table<float>(3, 4, true); auto filepath = temp_env->get_temp_filepath("ChunkedMismatchedStructure.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); EXPECT_THROW(cudf_io::write_parquet_chunked(*table2, state), cudf::logic_error); cudf_io::write_parquet_chunked_end(state); } TEST_F(ParquetChunkedWriterTest, ReadRowGroups) { srand(31337); auto table1 = create_random_fixed_table<int>(5, 5, true); auto table2 = create_random_fixed_table<int>(5, 5, true); auto full_table = cudf::concatenate({*table2, *table1, *table2}); auto filepath = temp_env->get_temp_filepath("ChunkedRowGroups.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked(*table2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; read_args.row_group_list = {1, 0, 1}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *full_table); } TEST_F(ParquetChunkedWriterTest, ReadRowGroupsError) { srand(31337); auto table1 = create_random_fixed_table<int>(5, 5, true); auto filepath = temp_env->get_temp_filepath("ChunkedRowGroupsError.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; read_args.row_group_list = {0, 1}; EXPECT_THROW(cudf_io::read_parquet(read_args), cudf::logic_error); read_args.row_group_list = {-1}; EXPECT_THROW(cudf_io::read_parquet(read_args), cudf::logic_error); } TYPED_TEST(ParquetChunkedWriterNumericTypeTest, UnalignedSize) { // write out two 31 row tables and make sure they get // read back with all their validity bits in the right place using T = TypeParam; int num_els = 31; std::vector<std::unique_ptr<cudf::column>> cols; bool mask[] = {0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; T c1a[] = {5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}; T c1b[] = {6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6}; column_wrapper<T> c1a_w(c1a, c1a + num_els, mask); column_wrapper<T> c1b_w(c1b, c1b + num_els, mask); cols.push_back(c1a_w.release()); cols.push_back(c1b_w.release()); cudf::table tbl1(std::move(cols)); T c2a[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8}; T c2b[] = {9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}; column_wrapper<T> c2a_w(c2a, c2a + num_els, mask); column_wrapper<T> c2b_w(c2b, c2b + num_els, mask); cols.push_back(c2a_w.release()); cols.push_back(c2b_w.release()); cudf::table tbl2(std::move(cols)); auto expected = cudf::concatenate({tbl1, tbl2}); auto filepath = temp_env->get_temp_filepath("ChunkedUnalignedSize.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(tbl1, state); cudf_io::write_parquet_chunked(tbl2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } TYPED_TEST(ParquetChunkedWriterNumericTypeTest, UnalignedSize2) { // write out two 33 row tables and make sure they get // read back with all their validity bits in the right place using T = TypeParam; int num_els = 33; std::vector<std::unique_ptr<cudf::column>> cols; bool mask[] = {0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; T c1a[] = {5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}; T c1b[] = {6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6}; column_wrapper<T> c1a_w(c1a, c1a + num_els, mask); column_wrapper<T> c1b_w(c1b, c1b + num_els, mask); cols.push_back(c1a_w.release()); cols.push_back(c1b_w.release()); cudf::table tbl1(std::move(cols)); T c2a[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8}; T c2b[] = {9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}; column_wrapper<T> c2a_w(c2a, c2a + num_els, mask); column_wrapper<T> c2b_w(c2b, c2b + num_els, mask); cols.push_back(c2a_w.release()); cols.push_back(c2b_w.release()); cudf::table tbl2(std::move(cols)); auto expected = cudf::concatenate({tbl1, tbl2}); auto filepath = temp_env->get_temp_filepath("ChunkedUnalignedSize2.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(tbl1, state); cudf_io::write_parquet_chunked(tbl2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } // custom mem mapped data sink that supports device writes template <bool supports_device_writes> class custom_test_memmap_sink : public cudf::io::data_sink { public: explicit custom_test_memmap_sink(std::vector<char>* mm_writer_buf) { mm_writer = cudf::io::data_sink::create(mm_writer_buf); } virtual ~custom_test_memmap_sink() { mm_writer->flush(); } void host_write(void const* data, size_t size) override { mm_writer->host_write(reinterpret_cast<char const*>(data), size); } bool supports_device_write() const override { return supports_device_writes; } void device_write(void const* gpu_data, size_t size, hipStream_t stream) { char* ptr = nullptr; CUDA_TRY(hipHostMalloc(&ptr, size)); CUDA_TRY(hipMemcpyAsync(ptr, gpu_data, size, hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); mm_writer->host_write(reinterpret_cast<char const*>(ptr), size); CUDA_TRY(hipHostFree(ptr)); } void flush() override { mm_writer->flush(); } size_t bytes_written() override { return mm_writer->bytes_written(); } private: std::unique_ptr<data_sink> mm_writer; }; TEST_F(ParquetWriterStressTest, LargeTableWeakCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<false> custom_sink(&mm_buf); namespace cudf_io = cudf::io; // exercises multiple rowgroups srand(31337); auto expected = create_random_fixed_table<int>(16, 4 * 1024 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, LargeTableGoodCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<false> custom_sink(&mm_buf); namespace cudf_io = cudf::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 128 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, LargeTableWithValids) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<false> custom_sink(&mm_buf); namespace cudf_io = cudf::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 6, true); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableWeakCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<true> custom_sink(&mm_buf); namespace cudf_io = cudf::io; // exercises multiple rowgroups srand(31337); auto expected = create_random_fixed_table<int>(16, 4 * 1024 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableGoodCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<true> custom_sink(&mm_buf); namespace cudf_io = cudf::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 128 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableWithValids) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<true> custom_sink(&mm_buf); namespace cudf_io = cudf::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 6, true); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } CUDF_TEST_PROGRAM_MAIN()
3edac09394ef3656784f2f47a76dbf49a22f2862.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/utilities/base_fixture.hpp> #include <tests/utilities/column_utilities.hpp> #include <tests/utilities/column_wrapper.hpp> #include <tests/utilities/cudf_gtest.hpp> #include <tests/utilities/type_lists.hpp> #include <cudf/concatenate.hpp> #include <cudf/io/data_sink.hpp> #include <cudf/io/functions.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <fstream> #include <type_traits> namespace cudf_io = cudf::io; template <typename T> using column_wrapper = typename std::conditional<std::is_same<T, cudf::string_view>::value, cudf::test::strings_column_wrapper, cudf::test::fixed_width_column_wrapper<T>>::type; using column = cudf::column; using table = cudf::table; using table_view = cudf::table_view; // Global environment for temporary files auto const temp_env = static_cast<cudf::test::TempDirTestEnvironment*>( ::testing::AddGlobalTestEnvironment(new cudf::test::TempDirTestEnvironment)); template <typename T, typename Elements> std::unique_ptr<cudf::table> create_fixed_table(cudf::size_type num_columns, cudf::size_type num_rows, bool include_validity, Elements elements) { auto valids = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<cudf::test::fixed_width_column_wrapper<T>> src_cols(num_columns); for (int idx = 0; idx < num_columns; idx++) { if (include_validity) { src_cols[idx] = cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_rows, valids); } else { src_cols[idx] = cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_rows); } } std::vector<std::unique_ptr<cudf::column>> columns(num_columns); std::transform(src_cols.begin(), src_cols.end(), columns.begin(), [](cudf::test::fixed_width_column_wrapper<T>& in) { auto ret = in.release(); ret->has_nulls(); return ret; }); return std::make_unique<cudf::table>(std::move(columns)); } template <typename T> std::unique_ptr<cudf::table> create_random_fixed_table(cudf::size_type num_columns, cudf::size_type num_rows, bool include_validity) { auto rand_elements = cudf::test::make_counting_transform_iterator(0, [](T i) { return rand(); }); return create_fixed_table<T>(num_columns, num_rows, include_validity, rand_elements); } template <typename T> std::unique_ptr<cudf::table> create_compressible_fixed_table(cudf::size_type num_columns, cudf::size_type num_rows, cudf::size_type period, bool include_validity) { auto compressible_elements = cudf::test::make_counting_transform_iterator(0, [period](T i) { return i / period; }); return create_fixed_table<T>(num_columns, num_rows, include_validity, compressible_elements); } // Base test fixture for tests struct ParquetWriterTest : public cudf::test::BaseFixture { }; // Base test fixture for "stress" tests struct ParquetWriterStressTest : public cudf::test::BaseFixture { }; // Typed test fixture for numeric type tests template <typename T> struct ParquetWriterNumericTypeTest : public ParquetWriterTest { auto type() { return cudf::data_type{cudf::type_to_id<T>()}; } }; // Typed test fixture for timestamp type tests template <typename T> struct ParquetWriterTimestampTypeTest : public ParquetWriterTest { auto type() { return cudf::data_type{cudf::type_to_id<T>()}; } }; // Declare typed test cases TYPED_TEST_CASE(ParquetWriterNumericTypeTest, cudf::test::NumericTypes); using SupportedTimestampTypes = cudf::test::TimestampTypes; TYPED_TEST_CASE(ParquetWriterTimestampTypeTest, SupportedTimestampTypes); // Base test fixture for chunked writer tests struct ParquetChunkedWriterTest : public cudf::test::BaseFixture { }; // Typed test fixture for numeric type tests template <typename T> struct ParquetChunkedWriterNumericTypeTest : public ParquetChunkedWriterTest { auto type() { return cudf::data_type{cudf::type_to_id<T>()}; } }; // Declare typed test cases TYPED_TEST_CASE(ParquetChunkedWriterNumericTypeTest, cudf::test::NumericTypes); namespace { // Generates a vector of uniform random values of type T template <typename T> inline auto random_values(size_t size) { std::vector<T> values(size); using T1 = T; using uniform_distribution = typename std::conditional_t<std::is_same<T1, bool>::value, std::bernoulli_distribution, std::conditional_t<std::is_floating_point<T1>::value, std::uniform_real_distribution<T1>, std::uniform_int_distribution<T1>>>; static constexpr auto seed = 0xf00d; static std::mt19937 engine{seed}; static uniform_distribution dist{}; std::generate_n(values.begin(), size, [&]() { return T{dist(engine)}; }); return values; } // Helper function to compare two tables void expect_tables_equal(cudf::table_view const& lhs, cudf::table_view const& rhs) { EXPECT_EQ(lhs.num_columns(), rhs.num_columns()); auto expected = lhs.begin(); auto result = rhs.begin(); while (result != rhs.end()) { cudf::test::expect_columns_equal(*expected++, *result++); } } } // namespace TYPED_TEST(ParquetWriterNumericTypeTest, SingleColumn) { auto sequence = cudf::test::make_counting_transform_iterator(0, [](auto i) { return TypeParam(i); }); auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("SingleColumn.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TYPED_TEST(ParquetWriterNumericTypeTest, SingleColumnWithNulls) { auto sequence = cudf::test::make_counting_transform_iterator(0, [](auto i) { return TypeParam(i); }); auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return (i % 2); }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("SingleColumnWithNulls.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TYPED_TEST(ParquetWriterTimestampTypeTest, Timestamps) { auto sequence = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return TypeParam((std::rand() / 10000) * 1000); }); auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("Timestamps.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; in_args.timestamp_type = this->type(); auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TYPED_TEST(ParquetWriterTimestampTypeTest, TimestampsWithNulls) { auto sequence = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return TypeParam((std::rand() / 10000) * 1000); }); auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return (i > 30) && (i < 60); }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("TimestampsWithNulls.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; in_args.timestamp_type = this->type(); auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TEST_F(ParquetWriterTest, MultiColumn) { constexpr auto num_rows = 100; // auto col0_data = random_values<bool>(num_rows); auto col1_data = random_values<int8_t>(num_rows); auto col2_data = random_values<int16_t>(num_rows); auto col3_data = random_values<int32_t>(num_rows); auto col4_data = random_values<float>(num_rows); auto col5_data = random_values<double>(num_rows); auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); // column_wrapper<bool> col0{ // col0_data.begin(), col0_data.end(), validity}; column_wrapper<int8_t> col1{col1_data.begin(), col1_data.end(), validity}; column_wrapper<int16_t> col2{col2_data.begin(), col2_data.end(), validity}; column_wrapper<int32_t> col3{col3_data.begin(), col3_data.end(), validity}; column_wrapper<float> col4{col4_data.begin(), col4_data.end(), validity}; column_wrapper<double> col5{col5_data.begin(), col5_data.end(), validity}; cudf_io::table_metadata expected_metadata; // expected_metadata.column_names.emplace_back("bools"); expected_metadata.column_names.emplace_back("int8s"); expected_metadata.column_names.emplace_back("int16s"); expected_metadata.column_names.emplace_back("int32s"); expected_metadata.column_names.emplace_back("floats"); expected_metadata.column_names.emplace_back("doubles"); std::vector<std::unique_ptr<column>> cols; // cols.push_back(col0.release()); cols.push_back(col1.release()); cols.push_back(col2.release()); cols.push_back(col3.release()); cols.push_back(col4.release()); cols.push_back(col5.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(5, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("MultiColumn.parquet"); cudf_io::write_parquet_args out_args{ cudf_io::sink_info{filepath}, expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, MultiColumnWithNulls) { constexpr auto num_rows = 100; // auto col0_data = random_values<bool>(num_rows); auto col1_data = random_values<int8_t>(num_rows); auto col2_data = random_values<int16_t>(num_rows); auto col3_data = random_values<int32_t>(num_rows); auto col4_data = random_values<float>(num_rows); auto col5_data = random_values<double>(num_rows); // auto col0_mask = cudf::test::make_counting_transform_iterator( // 0, [](auto i) { return (i % 2); }); auto col1_mask = cudf::test::make_counting_transform_iterator(0, [](auto i) { return (i < 10); }); auto col2_mask = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); auto col3_mask = cudf::test::make_counting_transform_iterator(0, [](auto i) { return (i == (num_rows - 1)); }); auto col4_mask = cudf::test::make_counting_transform_iterator(0, [](auto i) { return (i >= 40 || i <= 60); }); auto col5_mask = cudf::test::make_counting_transform_iterator(0, [](auto i) { return (i > 80); }); // column_wrapper<bool> col0{ // col0_data.begin(), col0_data.end(), col0_mask}; column_wrapper<int8_t> col1{col1_data.begin(), col1_data.end(), col1_mask}; column_wrapper<int16_t> col2{col2_data.begin(), col2_data.end(), col2_mask}; column_wrapper<int32_t> col3{col3_data.begin(), col3_data.end(), col3_mask}; column_wrapper<float> col4{col4_data.begin(), col4_data.end(), col4_mask}; column_wrapper<double> col5{col5_data.begin(), col5_data.end(), col5_mask}; cudf_io::table_metadata expected_metadata; // expected_metadata.column_names.emplace_back("bools"); expected_metadata.column_names.emplace_back("int8s"); expected_metadata.column_names.emplace_back("int16s"); expected_metadata.column_names.emplace_back("int32s"); expected_metadata.column_names.emplace_back("floats"); expected_metadata.column_names.emplace_back("doubles"); std::vector<std::unique_ptr<column>> cols; // cols.push_back(col0.release()); cols.push_back(col1.release()); cols.push_back(col2.release()); cols.push_back(col3.release()); cols.push_back(col4.release()); cols.push_back(col5.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(5, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("MultiColumnWithNulls.parquet"); cudf_io::write_parquet_args out_args{ cudf_io::sink_info{filepath}, expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, Strings) { std::vector<const char*> strings{ "Monday", "Monday", "Friday", "Monday", "Friday", "Friday", "Friday", "Funday"}; const auto num_rows = strings.size(); auto seq_col0 = random_values<int>(num_rows); auto seq_col2 = random_values<float>(num_rows); auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); column_wrapper<int> col0{seq_col0.begin(), seq_col0.end(), validity}; column_wrapper<cudf::string_view> col1{strings.begin(), strings.end()}; column_wrapper<float> col2{seq_col2.begin(), seq_col2.end(), validity}; cudf_io::table_metadata expected_metadata; expected_metadata.column_names.emplace_back("col_other"); expected_metadata.column_names.emplace_back("col_string"); expected_metadata.column_names.emplace_back("col_another"); std::vector<std::unique_ptr<column>> cols; cols.push_back(col0.release()); cols.push_back(col1.release()); cols.push_back(col2.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(3, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("Strings.parquet"); cudf_io::write_parquet_args out_args{ cudf_io::sink_info{filepath}, expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, MultiIndex) { constexpr auto num_rows = 100; auto col1_data = random_values<int8_t>(num_rows); auto col2_data = random_values<int16_t>(num_rows); auto col3_data = random_values<int32_t>(num_rows); auto col4_data = random_values<float>(num_rows); auto col5_data = random_values<double>(num_rows); auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); column_wrapper<int8_t> col1{col1_data.begin(), col1_data.end(), validity}; column_wrapper<int16_t> col2{col2_data.begin(), col2_data.end(), validity}; column_wrapper<int32_t> col3{col3_data.begin(), col3_data.end(), validity}; column_wrapper<float> col4{col4_data.begin(), col4_data.end(), validity}; column_wrapper<double> col5{col5_data.begin(), col5_data.end(), validity}; cudf_io::table_metadata expected_metadata; expected_metadata.column_names.emplace_back("int8s"); expected_metadata.column_names.emplace_back("int16s"); expected_metadata.column_names.emplace_back("int32s"); expected_metadata.column_names.emplace_back("floats"); expected_metadata.column_names.emplace_back("doubles"); expected_metadata.user_data.insert( {"pandas", "\"index_columns\": [\"floats\", \"doubles\"], \"column1\": [\"int8s\"]"}); std::vector<std::unique_ptr<column>> cols; cols.push_back(col1.release()); cols.push_back(col2.release()); cols.push_back(col3.release()); cols.push_back(col4.release()); cols.push_back(col5.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(5, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("MultiIndex.parquet"); cudf_io::write_parquet_args out_args{ cudf_io::sink_info{filepath}, expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; in_args.use_pandas_metadata = true; in_args.columns = {"int8s", "int16s", "int32s"}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, HostBuffer) { constexpr auto num_rows = 100 << 10; const auto seq_col = random_values<int>(num_rows); const auto validity = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); column_wrapper<int> col{seq_col.begin(), seq_col.end(), validity}; cudf_io::table_metadata expected_metadata; expected_metadata.column_names.emplace_back("col_other"); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); const auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); std::vector<char> out_buffer; cudf_io::write_parquet_args out_args{ cudf_io::sink_info(&out_buffer), expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info(out_buffer.data(), out_buffer.size())}; const auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, NonNullable) { srand(31337); auto expected = create_random_fixed_table<int>(9, 9, false); auto filepath = temp_env->get_temp_filepath("NonNullable.parquet"); cudf_io::write_parquet_args args{cudf_io::sink_info{filepath}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } // custom data sink that supports device writes. uses plain file io. class custom_test_data_sink : public cudf::io::data_sink { public: explicit custom_test_data_sink(std::string const& filepath) { outfile_.open(filepath, std::ios::out | std::ios::binary | std::ios::trunc); CUDF_EXPECTS(outfile_.is_open(), "Cannot open output file"); } virtual ~custom_test_data_sink() { flush(); } void host_write(void const* data, size_t size) override { outfile_.write(reinterpret_cast<char const*>(data), size); } bool supports_device_write() const override { return true; } void device_write(void const* gpu_data, size_t size, cudaStream_t stream) { char* ptr = nullptr; CUDA_TRY(cudaMallocHost(&ptr, size)); CUDA_TRY(cudaMemcpyAsync(ptr, gpu_data, size, cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); outfile_.write(reinterpret_cast<char const*>(ptr), size); CUDA_TRY(cudaFreeHost(ptr)); } void flush() override { outfile_.flush(); } size_t bytes_written() override { return outfile_.tellp(); } private: std::ofstream outfile_; }; TEST_F(ParquetWriterTest, CustomDataSink) { auto filepath = temp_env->get_temp_filepath("CustomDataSink.parquet"); custom_test_data_sink custom_sink(filepath); namespace cudf_io = cudf::io; srand(31337); auto expected = create_random_fixed_table<int>(5, 10, false); // write out using the custom sink { cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); } // write out using a memmapped sink std::vector<char> buf_sink; { cudf_io::write_parquet_args args{cudf_io::sink_info{&buf_sink}, *expected}; cudf_io::write_parquet(args); } // read them back in and make sure everything matches cudf_io::read_parquet_args custom_args{cudf_io::source_info{filepath}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); cudf_io::read_parquet_args buf_args{cudf_io::source_info{buf_sink.data(), buf_sink.size()}}; auto buf_tbl = cudf_io::read_parquet(buf_args); expect_tables_equal(buf_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterTest, DeviceWriteLargeishFile) { auto filepath = temp_env->get_temp_filepath("DeviceWriteLargeishFile.parquet"); custom_test_data_sink custom_sink(filepath); namespace cudf_io = cudf::io; // exercises multiple rowgroups srand(31337); auto expected = create_random_fixed_table<int>(4, 4 * 1024 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{filepath}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetChunkedWriterTest, SingleTable) { srand(31337); auto table1 = create_random_fixed_table<int>(5, 5, true); auto filepath = temp_env->get_temp_filepath("ChunkedSingle.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *table1); } TEST_F(ParquetChunkedWriterTest, SimpleTable) { srand(31337); auto table1 = create_random_fixed_table<int>(5, 5, true); auto table2 = create_random_fixed_table<int>(5, 5, true); auto full_table = cudf::concatenate({*table1, *table2}); auto filepath = temp_env->get_temp_filepath("ChunkedSimple.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked(*table2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *full_table); } TEST_F(ParquetChunkedWriterTest, LargeTables) { srand(31337); auto table1 = create_random_fixed_table<int>(512, 4096, true); auto table2 = create_random_fixed_table<int>(512, 8192, true); auto full_table = cudf::concatenate({*table1, *table2}); auto filepath = temp_env->get_temp_filepath("ChunkedLarge.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked(*table2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *full_table); } TEST_F(ParquetChunkedWriterTest, ManyTables) { srand(31337); std::vector<std::unique_ptr<table>> tables; std::vector<table_view> table_views; constexpr int num_tables = 96; for (int idx = 0; idx < num_tables; idx++) { auto tbl = create_random_fixed_table<int>(16, 64, true); table_views.push_back(*tbl); tables.push_back(std::move(tbl)); } auto expected = cudf::concatenate(table_views); auto filepath = temp_env->get_temp_filepath("ChunkedManyTables.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); std::for_each(table_views.begin(), table_views.end(), [&state](table_view const& tbl) { cudf_io::write_parquet_chunked(tbl, state); }); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } TEST_F(ParquetChunkedWriterTest, Strings) { std::vector<std::unique_ptr<cudf::column>> cols; bool mask1[] = {1, 1, 0, 1, 1, 1, 1}; std::vector<const char*> h_strings1{"four", "score", "and", "seven", "years", "ago", "abcdefgh"}; cudf::test::strings_column_wrapper strings1(h_strings1.begin(), h_strings1.end(), mask1); cols.push_back(strings1.release()); cudf::table tbl1(std::move(cols)); bool mask2[] = {0, 1, 1, 1, 1, 1, 1}; std::vector<const char*> h_strings2{"ooooo", "ppppppp", "fff", "j", "cccc", "bbb", "zzzzzzzzzzz"}; cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), mask2); cols.push_back(strings2.release()); cudf::table tbl2(std::move(cols)); auto expected = cudf::concatenate({tbl1, tbl2}); auto filepath = temp_env->get_temp_filepath("ChunkedStrings.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(tbl1, state); cudf_io::write_parquet_chunked(tbl2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } TEST_F(ParquetChunkedWriterTest, MismatchedTypes) { srand(31337); auto table1 = create_random_fixed_table<int>(4, 4, true); auto table2 = create_random_fixed_table<float>(4, 4, true); auto filepath = temp_env->get_temp_filepath("ChunkedMismatchedTypes.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); EXPECT_THROW(cudf_io::write_parquet_chunked(*table2, state), cudf::logic_error); cudf_io::write_parquet_chunked_end(state); } TEST_F(ParquetChunkedWriterTest, MismatchedStructure) { srand(31337); auto table1 = create_random_fixed_table<int>(4, 4, true); auto table2 = create_random_fixed_table<float>(3, 4, true); auto filepath = temp_env->get_temp_filepath("ChunkedMismatchedStructure.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); EXPECT_THROW(cudf_io::write_parquet_chunked(*table2, state), cudf::logic_error); cudf_io::write_parquet_chunked_end(state); } TEST_F(ParquetChunkedWriterTest, ReadRowGroups) { srand(31337); auto table1 = create_random_fixed_table<int>(5, 5, true); auto table2 = create_random_fixed_table<int>(5, 5, true); auto full_table = cudf::concatenate({*table2, *table1, *table2}); auto filepath = temp_env->get_temp_filepath("ChunkedRowGroups.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked(*table2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; read_args.row_group_list = {1, 0, 1}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *full_table); } TEST_F(ParquetChunkedWriterTest, ReadRowGroupsError) { srand(31337); auto table1 = create_random_fixed_table<int>(5, 5, true); auto filepath = temp_env->get_temp_filepath("ChunkedRowGroupsError.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; read_args.row_group_list = {0, 1}; EXPECT_THROW(cudf_io::read_parquet(read_args), cudf::logic_error); read_args.row_group_list = {-1}; EXPECT_THROW(cudf_io::read_parquet(read_args), cudf::logic_error); } TYPED_TEST(ParquetChunkedWriterNumericTypeTest, UnalignedSize) { // write out two 31 row tables and make sure they get // read back with all their validity bits in the right place using T = TypeParam; int num_els = 31; std::vector<std::unique_ptr<cudf::column>> cols; bool mask[] = {0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; T c1a[] = {5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}; T c1b[] = {6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6}; column_wrapper<T> c1a_w(c1a, c1a + num_els, mask); column_wrapper<T> c1b_w(c1b, c1b + num_els, mask); cols.push_back(c1a_w.release()); cols.push_back(c1b_w.release()); cudf::table tbl1(std::move(cols)); T c2a[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8}; T c2b[] = {9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}; column_wrapper<T> c2a_w(c2a, c2a + num_els, mask); column_wrapper<T> c2b_w(c2b, c2b + num_els, mask); cols.push_back(c2a_w.release()); cols.push_back(c2b_w.release()); cudf::table tbl2(std::move(cols)); auto expected = cudf::concatenate({tbl1, tbl2}); auto filepath = temp_env->get_temp_filepath("ChunkedUnalignedSize.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(tbl1, state); cudf_io::write_parquet_chunked(tbl2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } TYPED_TEST(ParquetChunkedWriterNumericTypeTest, UnalignedSize2) { // write out two 33 row tables and make sure they get // read back with all their validity bits in the right place using T = TypeParam; int num_els = 33; std::vector<std::unique_ptr<cudf::column>> cols; bool mask[] = {0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; T c1a[] = {5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}; T c1b[] = {6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6}; column_wrapper<T> c1a_w(c1a, c1a + num_els, mask); column_wrapper<T> c1b_w(c1b, c1b + num_els, mask); cols.push_back(c1a_w.release()); cols.push_back(c1b_w.release()); cudf::table tbl1(std::move(cols)); T c2a[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8}; T c2b[] = {9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}; column_wrapper<T> c2a_w(c2a, c2a + num_els, mask); column_wrapper<T> c2b_w(c2b, c2b + num_els, mask); cols.push_back(c2a_w.release()); cols.push_back(c2b_w.release()); cudf::table tbl2(std::move(cols)); auto expected = cudf::concatenate({tbl1, tbl2}); auto filepath = temp_env->get_temp_filepath("ChunkedUnalignedSize2.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(tbl1, state); cudf_io::write_parquet_chunked(tbl2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } // custom mem mapped data sink that supports device writes template <bool supports_device_writes> class custom_test_memmap_sink : public cudf::io::data_sink { public: explicit custom_test_memmap_sink(std::vector<char>* mm_writer_buf) { mm_writer = cudf::io::data_sink::create(mm_writer_buf); } virtual ~custom_test_memmap_sink() { mm_writer->flush(); } void host_write(void const* data, size_t size) override { mm_writer->host_write(reinterpret_cast<char const*>(data), size); } bool supports_device_write() const override { return supports_device_writes; } void device_write(void const* gpu_data, size_t size, cudaStream_t stream) { char* ptr = nullptr; CUDA_TRY(cudaMallocHost(&ptr, size)); CUDA_TRY(cudaMemcpyAsync(ptr, gpu_data, size, cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); mm_writer->host_write(reinterpret_cast<char const*>(ptr), size); CUDA_TRY(cudaFreeHost(ptr)); } void flush() override { mm_writer->flush(); } size_t bytes_written() override { return mm_writer->bytes_written(); } private: std::unique_ptr<data_sink> mm_writer; }; TEST_F(ParquetWriterStressTest, LargeTableWeakCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<false> custom_sink(&mm_buf); namespace cudf_io = cudf::io; // exercises multiple rowgroups srand(31337); auto expected = create_random_fixed_table<int>(16, 4 * 1024 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, LargeTableGoodCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<false> custom_sink(&mm_buf); namespace cudf_io = cudf::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 128 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, LargeTableWithValids) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<false> custom_sink(&mm_buf); namespace cudf_io = cudf::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 6, true); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableWeakCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<true> custom_sink(&mm_buf); namespace cudf_io = cudf::io; // exercises multiple rowgroups srand(31337); auto expected = create_random_fixed_table<int>(16, 4 * 1024 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableGoodCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<true> custom_sink(&mm_buf); namespace cudf_io = cudf::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 128 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableWithValids) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<true> custom_sink(&mm_buf); namespace cudf_io = cudf::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 6, true); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } CUDF_TEST_PROGRAM_MAIN()
5b205cf195e9e026b8953a88f9b147b28494c766.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "chamo.cuh" #define BLOCKSIZE 32 __global__ void updataCV(cvInput input){ int u = blockIdx.x*blockDim.x + threadIdx.x; int v = blockIdx.y*blockDim.y + threadIdx.y; if (u < input.width && v < input.height){ int pt = v*input.width + u; float u_ref_t = input.transMat[0] * u + input.transMat[1] * v + input.transMat[2]; float v_ref_t = input.transMat[4] * u + input.transMat[5] * v + input.transMat[6]; float d_ref_t = input.transMat[8] * u + input.transMat[9] * v + input.transMat[10]; float lowestErr = -1; float lowestErrInd = -1; float lowestD = -1; int steps = 0; for (float d = input.farZ; d < input.nearZ; d = d + input.zStep){ float u_ref = u_ref_t + input.transMat[3] * d; float v_ref = v_ref_t + input.transMat[7] * d; float d_ref = d_ref_t + input.transMat[11] * d; u_ref = u_ref / d_ref; v_ref = v_ref / d_ref; float4 ref_val = tex2D<float4>(input.refImg, u_ref, v_ref); float v1 = fabsf(ref_val.x - input.baseImg[pt * 4]); float v2 = fabsf(ref_val.y - input.baseImg[pt * 4 + 1]); float v3 = fabsf(ref_val.z - input.baseImg[pt * 4 + 2]); //float err = fabsf(ref_val - input.baseImg[pt]); float oldErr = input.cvData[pt*input.stepCount + steps]; float err = oldErr*(1 - input.weightPerImg) + (v1 + v2 + v3)*input.weightPerImg; //err = oldErr*(1-input.weightPerImg) + err*input.weightPerImg; input.cvData[pt*input.stepCount + steps] = err; if (lowestErr == -1){ lowestErr = err; lowestErrInd = steps; lowestD = d; } else{ if (lowestErr > err){ lowestErr = err; lowestErrInd = steps; lowestD = d; } } steps++; } input.lowValue[pt] = lowestD; input.lowInd[pt] = lowestErrInd; } } void updataCVCaller(cvInput input){ dim3 blockDim(BLOCKSIZE,BLOCKSIZE); dim3 girdDim((input.width + blockDim.x - 1) / (blockDim.x), (input.height + blockDim.y - 1) / (blockDim.y)); updataCV << <girdDim, blockDim >> >(input); }
5b205cf195e9e026b8953a88f9b147b28494c766.cu
#include <cuda_runtime.h> #include "chamo.cuh" #define BLOCKSIZE 32 __global__ void updataCV(cvInput input){ int u = blockIdx.x*blockDim.x + threadIdx.x; int v = blockIdx.y*blockDim.y + threadIdx.y; if (u < input.width && v < input.height){ int pt = v*input.width + u; float u_ref_t = input.transMat[0] * u + input.transMat[1] * v + input.transMat[2]; float v_ref_t = input.transMat[4] * u + input.transMat[5] * v + input.transMat[6]; float d_ref_t = input.transMat[8] * u + input.transMat[9] * v + input.transMat[10]; float lowestErr = -1; float lowestErrInd = -1; float lowestD = -1; int steps = 0; for (float d = input.farZ; d < input.nearZ; d = d + input.zStep){ float u_ref = u_ref_t + input.transMat[3] * d; float v_ref = v_ref_t + input.transMat[7] * d; float d_ref = d_ref_t + input.transMat[11] * d; u_ref = u_ref / d_ref; v_ref = v_ref / d_ref; float4 ref_val = tex2D<float4>(input.refImg, u_ref, v_ref); float v1 = fabsf(ref_val.x - input.baseImg[pt * 4]); float v2 = fabsf(ref_val.y - input.baseImg[pt * 4 + 1]); float v3 = fabsf(ref_val.z - input.baseImg[pt * 4 + 2]); //float err = fabsf(ref_val - input.baseImg[pt]); float oldErr = input.cvData[pt*input.stepCount + steps]; float err = oldErr*(1 - input.weightPerImg) + (v1 + v2 + v3)*input.weightPerImg; //err = oldErr*(1-input.weightPerImg) + err*input.weightPerImg; input.cvData[pt*input.stepCount + steps] = err; if (lowestErr == -1){ lowestErr = err; lowestErrInd = steps; lowestD = d; } else{ if (lowestErr > err){ lowestErr = err; lowestErrInd = steps; lowestD = d; } } steps++; } input.lowValue[pt] = lowestD; input.lowInd[pt] = lowestErrInd; } } void updataCVCaller(cvInput input){ dim3 blockDim(BLOCKSIZE,BLOCKSIZE); dim3 girdDim((input.width + blockDim.x - 1) / (blockDim.x), (input.height + blockDim.y - 1) / (blockDim.y)); updataCV << <girdDim, blockDim >> >(input); }
39088aeb8f86a1af35d5cea84efe24f1aa3e8dbd.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdio.h> #include <hip/hip_runtime.h> #define N 15000 using namespace std; __global__ void MatrVectMul(int *d_c, int *d_a, int *d_b) { int i = blockIdx.x*blockDim.x+threadIdx.x; if(i<N) { d_c[i]=0; for (int k=0;k<N;k++) d_c[i]+=d_a[i+k*N]*d_b[k]; } } //: threadIdx.x x, //blockIdx.x x, //blockDim.x . int main() { hipEvent_t start, stop; float gpuTime=0.0f; hipEventCreate(&start); hipEventCreate(&stop); // int *h_a,*h_b,*h_c; h_a = new int[N*N]; h_b = new int[N]; h_c = new int[N]; for (int i=0;i<N;i++) // a b { for (int k=0;k<N;k++) { h_a[i*N+k]=1; } h_b[i]=2; } // int *d_a,*d_b,*d_c; // hipMalloc((void **)&d_a, sizeof(int)*N*N); hipMalloc((void **)&d_b, sizeof(int)*N); hipMalloc((void **)&d_c, sizeof(int)*N); // hipMemcpy(d_a, h_a, sizeof(int)*N*N, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, sizeof(int)*N, hipMemcpyHostToDevice); // dim3 grid((N+255)/256, 1, 1); // dim3 threads(256, 1, 1); // hipEventRecord(start,0); hipEventSynchronize(start); // hipLaunchKernelGGL(( MatrVectMul) , dim3(grid), dim3(threads) , 0, 0, d_c, d_a, d_b); // , hipEventRecord(stop,0); // hipEventSynchronize(stop); hipEventElapsedTime(&gpuTime,start,stop); printf("Time: %.9f msec.\n",gpuTime); // hipMemcpy(h_c, d_c, sizeof(int)*N, hipMemcpyDeviceToHost); //for (int i=0;i<N;i++) cout<<h_c[i]<<' '; // hipFree(d_a); hipFree(d_b); hipFree(d_c); }
39088aeb8f86a1af35d5cea84efe24f1aa3e8dbd.cu
#include <iostream> #include <stdio.h> #include <cuda.h> #define N 15000 using namespace std; __global__ void MatrVectMul(int *d_c, int *d_a, int *d_b) { int i = blockIdx.x*blockDim.x+threadIdx.x; if(i<N) { d_c[i]=0; for (int k=0;k<N;k++) d_c[i]+=d_a[i+k*N]*d_b[k]; } } //Здесь: threadIdx.x – идентификатор потока в блоке по координате x, //blockIdx.x – идентификатор блока в гриде по координате x, //blockDim.x – количество потоков в одном блоке. int main() { cudaEvent_t start, stop; float gpuTime=0.0f; cudaEventCreate(&start); cudaEventCreate(&stop); // обычные массивы в оперативной памяти int *h_a,*h_b,*h_c; h_a = new int[N*N]; h_b = new int[N]; h_c = new int[N]; for (int i=0;i<N;i++) // инициализация массивов a и b { for (int k=0;k<N;k++) { h_a[i*N+k]=1; } h_b[i]=2; } // указатели на массивы в видеопамяти int *d_a,*d_b,*d_c; // выделение видеопамяти cudaMalloc((void **)&d_a, sizeof(int)*N*N); cudaMalloc((void **)&d_b, sizeof(int)*N); cudaMalloc((void **)&d_c, sizeof(int)*N); // копирование из оперативной памяти в видеопамять cudaMemcpy(d_a, h_a, sizeof(int)*N*N, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sizeof(int)*N, cudaMemcpyHostToDevice); // установка количества блоков dim3 grid((N+255)/256, 1, 1); // установка количества потоков в блоке dim3 threads(256, 1, 1); //Начать отсчета времени cudaEventRecord(start,0); cudaEventSynchronize(start); // вызов функции MatrVectMul <<< grid, threads >>> (d_c, d_a, d_b); //Окончание работы ядра, остановка времени cudaEventRecord(stop,0); //Синхронизируем с моментом окончания расчетов cudaEventSynchronize(stop); cudaEventElapsedTime(&gpuTime,start,stop); printf("Time: %.9f msec.\n",gpuTime); // копирование из видеопамяти в оперативную память cudaMemcpy(h_c, d_c, sizeof(int)*N, cudaMemcpyDeviceToHost); //for (int i=0;i<N;i++) cout<<h_c[i]<<' '; // освобождение памяти cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); }
1b31e4ecffcc2251d10dde1d6ad5a0c2ca3cf406.hip
// !!! This is a file automatically generated by hipify!!! #include "../NativeOps.h" #include <hip/hip_runtime.h> #include <cuda_launch_config.h> #include <buffer.h> #include <helpers/shape.h> #include "../Environment.h" #include <helpers/TAD.h> #include <ops/specials.h> #include <loops/reduce3.h> #include <loops/reduce.h> #include <loops/indexreduce.h> #include <loops/pairwise_transform.h> #include <loops/transform.h> #include <loops/scalar.h> #include <loops/broadcasting.h> #include <loops/summarystatsreduce.h> #include <loops/random.h> //#include <thread> #include <map> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <cuda_device_runtime_api.h> #include <pointercast.h> #include <stdio.h> #include <stdlib.h> #include <loops/type_conversions.h> #include <op_boilerplate.h> #include <loops/grid_shaped.h> #include <loops/grid_strided.h> #include <loops/aggregates.h> #include <helpers/threshold.h> #include <ShapeList.h> #include <Context.h> #include <ops/specials_cuda.h> // FIXME: we need cuda-specific implementations #include <helpers/logger.h> #include <NDArray.h> #include <NDArrayFactory.h> #include <GraphExecutioner.h> #include <graph/GraphHolder.h> #include <graph/VariablesSet.h> #include <ops/declarable/OpRegistrator.h> #include <ops/declarable/CustomOperations.h> //#include <sys/time.h> // b40c only available for gcc :( #ifdef __clang__ // do nothing #elif __GNUC__ #include <b40c/util/error_utils.cuh> #include <b40c/util/multiple_buffering.cuh> #include <b40c/radix_sort/enactor.cuh> #endif #include <hiprand/hiprand.h> #include <Status.h> using namespace nd4j; hipDeviceProp_t *deviceProperties; hipFuncAttributes *funcAttributes = new hipFuncAttributes[64]; int blockLimit = 128; int maxThreads = 512; bool allowedP2P = false; bool supportedP2P = false; #ifdef __EXPERIMENTAL__ bool experimentalSupport = true; #else bool experimentalSupport = false; #endif int minThreads = 32; __constant__ char deviceConstantMemory[49152]; typedef struct { long streamId; long callId; } __syncInfo; typedef __syncInfo SyncInfo; // this method isn't used, left here for legacy and caution purposes // TLDR: don't use this way, it sucks void CUDART_CB syncCallback(hipStream_t stream, hipError_t status, void *data){ SyncInfo *sync = (SyncInfo *) data; printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId); } // this method just does type conversion in fancy way int getDeviceId(Nd4jPointer ptrToDeviceId) { return (int)(Nd4jIndex)ptrToDeviceId; } template <typename T> dim3 getOptimalDimensions(Nd4jIndex n,hipFuncAttributes attributes, hipDeviceProp_t properties) { // we can combine the two to compute a block size int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties); // no real sense launching more threads, then number of elements we have if (num_threads > n) num_threads = n; if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads; // compute the number of blocks of size num_threads to launch int num_blocks = n / num_threads; // check for partial block at the end if (num_blocks > blockLimit) num_blocks = blockLimit; if (num_blocks < 4 && n > 128) { num_blocks = 4; num_threads = n / num_blocks; } if (num_threads >= 768) { num_blocks = num_blocks * 2; num_threads = num_threads / 2; } if(n % num_threads && num_blocks < blockLimit) ++num_blocks; //(num_threads * sizeof(T)) + attributes.sharedSizeBytes); return dim3(num_blocks,num_threads, 3000); } int getBaseMemorySize(int xRank, hipFuncAttributes funcAttr) { int memory_limit = 256; //funcAttr.sharedSizeBytes; // TODO: remove this later memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4) /* if (xRank == 0) xRank = 2; memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4; memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4; memory_limit += (xRank * 4) * 6; memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase */ return memory_limit; } /* * Basic CUDA constants here: number of blocks per MP */ int getDeviceBlockThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; int blockThreshold = 8; if (ccMajor >= 5) blockThreshold = 32; else if (ccMajor == 3) blockThreshold = 16; else if (ccMajor < 3) blockThreshold = 8; return blockThreshold; } dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, hipFuncAttributes funcAttr) { int countMP = deviceProperties[deviceId].multiProcessorCount; int blockThreshold = getDeviceBlockThreshold(deviceId); int num_threads = problemLength / (countMP * blockThreshold); num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads); num_threads = nd4j::math::nd4j_max<int>(num_threads, 64); num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads); int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr); dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit); return launchDims; } /* * This message returns shared memory threshold value. default overflow ratio is 0.3 */ int getDeviceSharedThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; // please note threshold isn't multiple of 32, and that's NOT a mistake int shmemThreshold; if (ccMajor == 6 && ccMinor == 0) shmemThreshold = 65536; else if (ccMajor == 6 && ccMinor == 1) shmemThreshold = 49152; else if (ccMajor == 5 && ccMinor == 2) shmemThreshold = 98304; else if (ccMajor == 5) shmemThreshold = 65536; else if (ccMajor == 3 && ccMinor == 7) shmemThreshold = 114688; else shmemThreshold = 49152; return shmemThreshold / 0.3; } dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) { int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int warpSize = deviceProperties[deviceId].warpSize; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); // round num_threads to nearest warpSize num_threads -= num_threads % warpSize; num_threads = nd4j::math::nd4j_max<int>(1, num_threads); if (num_threads < warpSize && tadLength < warpSize) num_threads = tadLength; // since we use shared memory as fast memory for some cases - we need to count that in int memory_limit = getBaseMemorySize(xRank, funcAttr); int memory_floor = memory_limit; int effective_block_limit = countMP * blockThreshold; int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit); int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize); // at this moment we've stored all required information for things. time to count in reduction multipliers int reduction_per_block = 0; bool found = false; if (reduction > 0) while (!found) { reduction_per_block = (num_threads * elementSize * reduction); if (memory_limit + reduction_per_block < desiredShared) { memory_limit += reduction_per_block; found = true; } else { if (num_threads > minThreads) { num_threads -= 32; } else { memory_limit += reduction_per_block; found = true; } } } // at this moment we know total memory used per block, and we also know per-mp limit. int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block); // we don't want to spawn more blocks, that gpu can actually handle without queue //num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // if (num_blocks > countMP) // num_blocks = num_blocks - (num_blocks % countMP); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } reduction_per_block = (num_threads * elementSize * reduction); memory_limit = memory_floor + reduction_per_block; } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP); return dim3(num_blocks,num_threads, memory_limit); } /* * This method returns kernel launch param for linear memory access */ dim3 getFlatLaunchParams(int deviceId, int *xShapeInfo, int *yShapeInfo, hipFuncAttributes funcAttr) { int xRank = shape::rank(xShapeInfo); int yRank = yShapeInfo == nullptr ? 0 : shape::rank(yShapeInfo); int zRank = 0; int memory_limit = getBaseMemorySize(xRank, funcAttr); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); int xLength = shape::length(xShapeInfo); int effective_block_limit = countMP * blockThreshold; // for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here int num_threads = xLength / effective_block_limit; if (num_threads < minThreads) num_threads = minThreads; num_threads = num_threads - (num_threads % 32); int memory_floor = memory_limit; int num_blocks = xLength / num_threads; num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } } if (xLength / num_threads > blockLimit) num_blocks *= 2; dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit); return launchDims; } /** * This method returns kernel launch params with TAD-based memory access * * @param deviceId * @param xShapeInfo * @param tadShapeInfo * @param funcAttr * @param dimensionLength * @param elementSize * @param reductionSize * @return */ dim3 getReduceLaunchParams(int deviceId, int *xShapeInfo, int *tadShapeInfo, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) { int tadLength = 0; int numTads = 0; if (tadShapeInfo != nullptr) { tadLength = shape::length(tadShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; if (tadLength == 1) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("A xLength: [%i], zLength: [%i]\n", shape::length(xShapeInfo), shape::length(tadShapeInfo)); } } else{ // we have special case - reduction along all dimensions tadLength = nd4j::math::nd4j_min<int>(shape::length(xShapeInfo), 768); numTads = shape::length(xShapeInfo) / tadLength; } int xRank = shape::rank(xShapeInfo); int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo); dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { //|| launchDims.x == 1 printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.x: [%i], launchDims.y: [%i], launchDims.z: [%i]\n", shape::length(xShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z); } return launchDims; } /** * Returns optimal launch parameters * given the extra pointers passed in. * The extra pointer should be * the host pointer for the shape information * associated with the data. * From there it is used to obtain the length * from which we can derive the optimal launch parameters. * */ template <typename T> dim3 getOptimalLaunchParameters(Nd4jPointer *extraPointers, hipFuncAttributes attributes, hipDeviceProp_t properties) { int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); Nd4jIndex n = shape::length(hostXShapeInfo); dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y)); return launchDims; } nd4j::buffer::Buffer<int> * createScalarBuffer(hipStream_t stream) { int *scalarShapeInfo = shape::createScalarShapeInfo(); nd4j::buffer::Buffer<int> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream); nd4j::buffer::copyDataToGpu(&buff, stream); return buff; } class ScalarShapeInformation { private: nd4j::buffer::Buffer<int> *scalarDimension; nd4j::buffer::Buffer<int> *scalarShapeInfo; // std::thread::id threadId; public: ScalarShapeInformation(hipStream_t stream) { int *scalarDimensionBuff = (int *) malloc(sizeof(int)); scalarDimensionBuff[0] = MAX_DIMENSION; scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream); scalarShapeInfo = createScalarBuffer(stream); // threadId = std::this_thread::get_id(); } ~ScalarShapeInformation() { nd4j::buffer::freeBuffer(&scalarShapeInfo); nd4j::buffer::freeBuffer(&scalarDimension); } int *getShapeInfoHostPointer() { return scalarShapeInfo->data; } int * getShapeInfoGpuPointer() { return scalarShapeInfo->gData; } int * getDimensionHostPointer() { return scalarDimension->data; } int * getDimensionGpuPointer() { return scalarDimension->gData; } }; template <typename T> class ScalarInfo { nd4j::buffer::Buffer<T> *scalarData; ScalarShapeInformation *shapeInfo; T finalResult; hipStream_t streamRef; public: ScalarInfo(hipStream_t stream) { T *scalarResult = (T*)malloc(sizeof(T)); shapeInfo = new ScalarShapeInformation(stream); scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream); streamRef = stream; nd4j::buffer::copyDataToGpu(&scalarData, stream); } T getFinalResultFromDevice() { nd4j::buffer::copyDataFromGpu(&scalarData, streamRef); return scalarData->data[0]; } /** * Get the device shape information * representing a scalar */ int *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); } /** * Get the result pointers */ T *getDevicePointer() { return scalarData->gData; } /** * Get the infinite dimension device pointer */ int *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); } ~ScalarInfo() { nd4j::buffer::freeBuffer(&scalarData); delete shapeInfo; } }; /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ double NativeOps::execIndexReduceScalarDouble(Nd4jPointer *extraPointers,int opNum, double *x, int *xShapeInfo, double *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D1 opNum:[%i]\n", opNum); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], 1, sizeof(double), 3); hipLaunchKernelGGL(( indexReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); checkCudaErrors(hipStreamSynchronize(*stream)); double result = resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execIndexReduceDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int *dimension, int dimensionLength) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D2 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], dimensionLength, sizeof(double), 3); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); hipLaunchKernelGGL(( indexReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execBroadcastDouble(Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, int *dimension, int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *deviceTADOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D3 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[26], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(broadcastSimple, double, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xStride * @param y * @param yStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execPairwiseTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int xStride, double *y, int yStride, double *result, int resultStride, double *extraParams, Nd4jIndex n) { dim3 launchDims(512, 512, 2048); functions::pairwise_transforms::PairWiseTransform<double>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n); } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n * @param xIndexes * @param yIndexes * @param resultIndexes */ void NativeOps::execPairwiseTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, double *extraParams, int *xIndexes, int *yIndexes, int *resultIndexes) { /* hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D5 opNum:[%i]\n", opNum); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[24]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); pairWiseTransformDoubleIndex <<<launchDims.x, launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), xIndexes, yIndexes, resultIndexes, allocationPointer, deviceTADShapeInfo); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); */ } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execPairwiseTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, double *extraParams) { dim3 launchDims(512, 512, 2048); functions::pairwise_transforms::PairWiseTransform<double>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D7 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); if (opNum == 19) { execReduceDouble(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo); } dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int *dimension, int dimensionLength) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D8 opNum:[%i]\n", opNum); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); if (opNum == 19) { execReduceDouble(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength); //checkCudaErrors(hipStreamSynchronize(*stream)); } /** * We have separate kernels, optimized for different number of dimensions for reductions */ if (dimensionLength == 1) { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric1D, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else if (shape::rank(hostTADShapeInfo) <= 3) { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[33], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric3D, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGenericXD, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @return */ double NativeOps::execReduceScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D9 opNum:[%i]\n", opNum); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); //dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[22]); // for LogExpSum op we need to know max value, and store it if (opNum == 19) { double tmp = execReduceScalarDouble(extraPointers, 3, x, xShapeInfo, extraParams); extraParams = resultPointer; }; // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(hipStreamSynchronize(*stream)); double result = resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo */ void NativeOps::execReduce3Double( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *y, int *yShapeInfo, double *result, int *resultShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D10 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]); hipLaunchKernelGGL(( reduce3Double), dim3(1),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, nullptr, 1, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo */ double NativeOps::execReduce3ScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *y, int *yShapeInfo){ if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D11 opNum:[%i]\n", opNum); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]); hipLaunchKernelGGL(( reduce3ScalarDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, y, yShapeInfo, extraParams, resultPointer, nullptr, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); // since this method should return scalar value - we should block on this call checkCudaErrors(hipStreamSynchronize(*stream)); double result = resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execReduce3Double( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, int *dimension, int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D12 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]); hipLaunchKernelGGL(( reduce3Double), dim3(1),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xStride * @param result * @param resultStride * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int xStride, double *result, int resultStride, double scalar, double *extraParams, Nd4jIndex n) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[20]); functions::scalar::ScalarTransform<double>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, scalar, extraParams, n); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *result, int *resultShapeInfo, double scalar, double *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[19]); // this macro builds bunch of IF/ELSE selectors for kernel launch //DISPATCH_SIMPLE(scalarSimpleShaped, double, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS)) functions::scalar::ScalarTransform<double>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n * @param xIndexes * @param resultIndexes */ void NativeOps::execScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *result, int *resultShapeInfo, double scalar, double *extraParams, Nd4jIndex n, int *xIndexes, int *resultIndexes){ printf("Unsupported operation: scalarIndices\n"); /* } hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D15 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[18]); scalarDoubleIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, n, scalar, x, extraParams, result, resultIndexes, allocPointer); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); */ } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ double NativeOps::execSummaryStatsScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(double), 8); launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); return functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execSummaryStatsDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo,bool biasCorrected) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D17 opNum:[%i]\n", opNum); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(double), 8); // we have to limit grid size here, due to limited nature of reduction/allocation pointers launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execSummaryStatsDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(double), 8); // we're limiting maximum grid size for summaryStats ops launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected); } /** * * @param opNum * @param dx * @param xStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int xStride, double *z, int zStride, double *extraParams, Nd4jIndex n) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D19 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[16]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformStrided, double, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *result, int *resultShapeInfo, double *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D20 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); int *maskedAllocPointer = allocPointer; // special pointer for special buffer for special ops double *specialPointer = reinterpret_cast<double *>(extraPointers[6]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]); int *dimension = (int *) specialPointer; int *maxDimension = dimension + 1; int *maxShapeBuffer = (int *) maxDimension + 1; double * special = (double *) maxShapeBuffer + (MAX_RANK * 2 + 4); int *devTadShapeInfo = reinterpret_cast<int *> (extraPointers[10]); Nd4jIndex *devTadOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]); /** * ops between 38 and 41 are special ops: * SoftMax, LogSoftMax, SoftMaxDerivative, IsMax * On cuda we execute them as */ // simple trick to get workaround over reductions into scalar if (opNum >= 38 && opNum <= 41) { if (shape::isVector(hostXShapeInfo) && opNum != 41) { // if that's vector, we just go directly to op in 1 block /* * For vector cases of everything, but IsMax (41) we go for single-kernel calls */ int length = shape::length(hostXShapeInfo); int block = nd4j::math::nd4j_min<int>(256, length); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(double) * 4); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, double, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS)) } else { // going for blockwise specials // we'll do some pointers mangling here, and execute kernels one by one int *shape = shape::shapeOf(hostXShapeInfo); switch (opNum) { case 40: // LogSoftMax case 39: // SoftMax Derivative case 38: {// softmax Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1}; int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; // TODO: we could get rid of this one eventually hipLaunchKernelGGL(( prepareShapeBuffer) , dim3(1), dim3(1), 128, *stream, dimension, maxDimension, maxShapeBuffer, shape[0]); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceDouble(tempPointers, 3, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcastDouble(tempPointers, 1, dx, xShapeInfo, special, maxShapeBuffer, result, resultShapeInfo, dimension, 1); // exp 3 execTransformDouble(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceDouble(tempPointers, 1, result, resultShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcastDouble(tempPointers, 3, result, resultShapeInfo, special, maxShapeBuffer, result, resultShapeInfo, dimension, 1); // log 3 if (opNum == 40) execTransformDouble(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams); else if (opNum == 39) execTransformDouble(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams); checkCudaErrors(hipStreamSynchronize(*stream)); delete hostMaxShapeBuffer; break; } case 41: { // IsMax along all dimensions bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } if (scalarCheat) { /** * In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call */ int maxIdx = (int) execIndexReduceScalarDouble(extraPointers, 0, dx, xShapeInfo, extraParams); int targetIdx = 0; if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1]; hipLaunchKernelGGL(( fillIsMaxDouble), dim3(1), dim3(128), 0, *stream , result, shape::length(hostXShapeInfo), targetIdx); } else { int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]); Nd4jIndex *tadMaxOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); special = reinterpret_cast<double *>(extraPointers[17]); int dimensionLength = getDeviceId(extraPointers[18]); // we call for IMax on specified dimension execIndexReduceDouble(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); // at this point, all IMax indexes are gathered, and we execute filler hipLaunchKernelGGL(( fillDimensionalIsMaxDouble), dim3(blockLimit), dim3(64), funcAttributes[37].sharedSizeBytes, *stream, special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets ); checkCudaErrors(hipStreamSynchronize(*stream)); } break; } default: { printf("Bad case for transformDouble\n"); break; } } } } else { // for Im2Col & Col2Im we enforce higher dimensionality // TODO: investigate this on high-end gpus if (opNum == 37 || opNum == 36 || opNum == 71) { launchDims.x = 512; launchDims.y = 512; launchDims.z += 512 * sizeof(double); } else if (opNum == 70) { // we'll be using shared memory to speed up reverse launchDims.z += launchDims.y * sizeof(double); } // Histogram op requires additional memory chunk // FIXME: make this one to use cache if (opNum == 48) { int length = shape::length(hostZShapeInfo); hipMalloc((void **)&maskedAllocPointer, length * launchDims.x * sizeof(double)); } if (opNum == 71) { launchDims.z += 512 * sizeof(double); } // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, double, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS)) // we need guaranteed sync here, due to temp memory release if (nd4j::Environment::getInstance()->isDebug() || opNum == 48) checkCudaErrors(hipStreamSynchronize(*stream)); // release Histogram memory if (opNum == 48) { hipFree((void *)maskedAllocPointer); } } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *result, int *resultShapeInfo, double *extraParams, int *xIndexes, int *resultIndexes) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D21 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[14]); hipLaunchKernelGGL(( transformDoubleIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultIndexes, allocPointer, reductionPointer); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ float NativeOps::execIndexReduceScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams){ if (nd4j::Environment::getInstance()->isDebug()) printf("F1 opNum:[%i]\n", opNum); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float), 4); if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1) printf("AF1 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( indexReduceFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); // once again - since we return scalar value in this method, we should block this kernel launch checkCudaErrors(hipStreamSynchronize(*stream)); float result = resultPointer[0]; return result; } float NativeOps::execIndexReduceScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams){ if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H1 opNum:[%i]\n", opNum); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float16), 8); if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1) printf("AH1 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( indexReduceHalf), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); // blocking for scalar output checkCudaErrors(hipStreamSynchronize(*stream)); float result = (float) resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execIndexReduceFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int *dimension, int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F2 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float), 4); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF2 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( indexReduceFloat), dim3(launchDims.x), dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execIndexReduceHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H2 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float16), 8); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH2 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( indexReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execBroadcastFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, int *dimension, int dimensionLength){ /* hipEvent_t start; hipEventCreateWithFlags(&start, hipEventDisableTiming); timespec tsX; timespec tsY; clock_gettime(CLOCK_REALTIME, &tsX); */ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *deviceTADOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F3 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float), 0); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(broadcastSimple, float, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS)) /* SyncInfo *info = new SyncInfo(); info->streamId = 32; info->callId = 1234567890; timespec ts1; timespec ts2; clock_gettime(CLOCK_REALTIME, &ts1); */ /* broadcastFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), y, yShapeInfo, shape::rank(hostYShapeInfo), result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ); */ /* clock_gettime(CLOCK_REALTIME, &ts2); // hipEventRecord(start, 0); // hipStreamAddCallback(*stream, syncCallback, (void*)info, 0); */ if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); /* clock_gettime(CLOCK_REALTIME, &tsY); printf("Execution time: %i\n", (ts2.tv_nsec - ts1.tv_nsec)); printf("Overall time: %i\n", (tsY.tv_nsec - tsX.tv_nsec)); printf("Callback setup time: %i\n", (tsY.tv_nsec - ts2.tv_nsec)); printf("-------------------------------------\n"); */ } void NativeOps::execBroadcastHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *deviceTADOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H3 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float16), 0); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(broadcastSimple, float16, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xStride * @param y * @param yStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execPairwiseTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int xStride, float *y, int yStride, float *result, int resultStride, float *extraParams, Nd4jIndex n){ dim3 launchDims(512, 512, 2048); functions::pairwise_transforms::PairWiseTransform<float>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n); } void NativeOps::execPairwiseTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int xStride, float16 *y, int yStride, float16 *result, int resultStride, float16 *extraParams, Nd4jIndex n){ dim3 launchDims(512, 512, 2048); functions::pairwise_transforms::PairWiseTransform<float16>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n); } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n * @param xIndexes * @param yIndexes * @param resultIndexes */ void NativeOps::execPairwiseTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int *xShapeInfo, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, float *extraParams, int *xIndexes, int *yIndexes, int *resultIndexes){ /* hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F5 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(float), 0); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF5 opNum:[%i]\n", opNum); pairWiseTransformFloatIndex<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), xIndexes, yIndexes, resultIndexes, allocationPointer, deviceTADShapeInfo); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); */ } void NativeOps::execPairwiseTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int *xShapeInfo, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams, int *xIndexes, int *yIndexes, int *resultIndexes){ /* hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H5 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(float16), 0); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH5 opNum:[%i]\n", opNum); pairWiseTransformHalfIndex<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), xIndexes, yIndexes, resultIndexes, allocationPointer, deviceTADShapeInfo); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); */ } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execPairwiseTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int *xShapeInfo, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, float *extraParams){ dim3 launchDims(512, 512, 2048); functions::pairwise_transforms::PairWiseTransform<float>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);; } void NativeOps::execPairwiseTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int *xShapeInfo, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams){ dim3 launchDims(512, 512, 2048); functions::pairwise_transforms::PairWiseTransform<float16>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F7 opNum:[%i]\n", opNum); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float), 1); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF7 opNum:[%i]\n", opNum); if (opNum == 19) { execReduceFloat(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo); } // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execReduceHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H7 opNum:[%i]\n", opNum); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float16), 1); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH7 opNum:[%i]\n", opNum); if (opNum == 19) { execReduceHalf(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo); } // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int *dimension,int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F8 opNum:[%i]\n", opNum); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); // dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float), 1); if (opNum == 19) { execReduceFloat(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength); } // we call different kernels optimized for different number of dimensions in TAD if (dimensionLength == 1) { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(float), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric1D, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else if (shape::rank(hostTADShapeInfo) <= 3) { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(float), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric3D, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(float), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGenericXD, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execReduceHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo, int *dimension,int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H8 opNum:[%i]\n", opNum); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float16), 1); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH8 opNum:[%i]\n", opNum); if (opNum == 19) { execReduceHalf(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength); } // calling different kernels, depending on number of dimensions in TAD if (dimensionLength == 1) { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric1D, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else if (shape::rank(hostTADShapeInfo) <= 3) { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric3D, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGenericXD, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @return */ float NativeOps::execReduceScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F9 opNum:[%i]\n", opNum); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[8]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF9 opNum:[%i]\n", opNum); // for LogExpSum op we need to know max value, and store it if (opNum == 19) { float tmp = execReduceScalarFloat(extraPointers, 3, x, xShapeInfo, extraParams); extraParams = resultPointer; }; // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) // blocking this one checkCudaErrors(hipStreamSynchronize(*stream)); float result = resultPointer[0]; return result; } float NativeOps::execReduceScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H9 opNum:[%i]\n", opNum); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 2, funcAttributes[8]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH9 opNum:[%i]\n", opNum); // for LogExpSum op we need to know max value, and store it if (opNum == 19) { float tmp = execReduceScalarHalf(extraPointers, 3, x, xShapeInfo, extraParams); extraParams = resultPointer; }; // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) // blocking call checkCudaErrors(hipStreamSynchronize(*stream)); float result = (float) resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo */ void NativeOps::execReduce3Float( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *y, int *yShapeInfo, float *result, int *resultShapeInfo){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F10 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF10 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( reduce3ScalarFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execReduce3Half( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H10 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH10 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( reduce3ScalarHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo */ float NativeOps::execReduce3ScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *y, int *yShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F11 opNum:[%i]\n", opNum); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 32, funcAttributes[7]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF11 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( reduce3ScalarFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, y, yShapeInfo, extraParams, resultPointer, nullptr, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); // blocking call checkCudaErrors(hipStreamSynchronize(*stream)); float result = resultPointer[0]; return result; } float NativeOps::execReduce3ScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *y, int *yShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H11 opNum:[%i]\n", opNum); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH11 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( reduce3ScalarHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z + 2048, *stream, opNum, x, xShapeInfo, y, yShapeInfo, extraParams, resultPointer, nullptr, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); // blocking call checkCudaErrors(hipStreamSynchronize(*stream)); float result = (float) resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execReduce3Float( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, int *dimension, int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F12 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF12 opNum:[%i]\n", opNum); if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) { reduce3ScalarFloat << < launchDims.x, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } else { reduce3Float << < 1, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execReduce3Half( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H12 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH12 opNum:[%i]\n", opNum); if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) { reduce3ScalarHalf<< < launchDims.x, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } else { reduce3Half<< < 1, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xStride * @param result * @param resultStride * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int xStride, float *result, int resultStride, float scalar, float *extraParams, Nd4jIndex n){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]); // this macro builds bunch of IF/ELSE selectors for kernel launch functions::scalar::ScalarTransform<float>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, scalar, extraParams, n); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int xStride, float16 *result, int resultStride, float scalar, float16 *extraParams, Nd4jIndex n){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]); // this macro builds bunch of IF/ELSE selectors for kernel launch //DISPATCH_SIMPLE(scalarSimpleStrided, float16, PARAMS(n, scalar, x, xStride, extraParams, result, resultStride, allocPointer), OPS_A(SCALAR_OPS)) float16 sc = (float16) scalar; functions::scalar::ScalarTransform<float16>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, sc, extraParams, n); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *result, int *resultShapeInfo, float scalar, float *extraParams){ int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); Nd4jIndex n = shape::length(hostXShapeInfo); // if (nd4j::Environment::getInstance()->isDebugAndVerbose()) // printf("F14 opNum:[%i]\n", opNum); //dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]); //if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) // printf("AF14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch //DISPATCH_SIMPLE(scalarSimpleShaped, float, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS)) functions::scalar::ScalarTransform<float>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *result, int *resultShapeInfo, float scalarF, float16 *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); Nd4jIndex n = shape::length(hostXShapeInfo); //if (nd4j::Environment::getInstance()->isDebugAndVerbose()) // printf("H14 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]); float16 scalar = (float16) scalarF; //if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) // printf("AH14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch //DISPATCH_SIMPLE(scalarSimpleShaped, float16, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS)) functions::scalar::ScalarTransform<float16>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n * @param xIndexes * @param resultIndexes */ void NativeOps::execScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *result, int *resultShapeInfo, float scalar, float *extraParams, int *xIndexes, int *resultIndexes){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); Nd4jIndex n = shape::length(hostXShapeInfo); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F15 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[4]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF15 opNum:[%i]\n", opNum); /* scalarFloatIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, n, scalar, x, extraParams, result, resultIndexes, allocPointer); */ if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ float NativeOps::execSummaryStatsScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float), 8); // we limit grid size for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); return functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected); } float NativeOps::execSummaryStatsScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float16), 8); launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); return (float) functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execSummaryStatsFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float), 8); // limiting number of blocks in grid, to match buffer memory size launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected); } void NativeOps::execSummaryStatsHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float16), 8); // as everywhere else, we limit maximal number of blocks for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execSummaryStatsFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(float), 8); // as everywhere else, we limit maximal number of blocks for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected); } void NativeOps::execSummaryStatsHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(float16), 8); // as everywhere else, we limit maximal number of blocks for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected); } /** * * @param opNum * @param dx * @param xStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int xStride, float *z, int zStride, float *extraParams, Nd4jIndex n) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F19 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformStrided, float, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int xStride, float16 *z, int zStride, float16 *extraParams, Nd4jIndex n) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H19 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformStrided, float16, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum, float *dx, int *xShapeInfo, float *result, int *resultShapeInfo, float *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F20 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); // special pointer for special buffer for special ops float *specialPointer = reinterpret_cast<float *>(extraPointers[6]); int *dimension = (int *) specialPointer; int *maxDimension = dimension + 1; int *maxShapeBuffer = (int *) maxDimension + 1; float * special = (float *) maxShapeBuffer + (MAX_RANK * 2 + 4); int *maskedAllocPointer = allocPointer; int *devTadShapeInfo = reinterpret_cast<int *> (extraPointers[10]); Nd4jIndex *devTadOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF20 opNum:[%i]\n", opNum); // simple trick to get workaround over reductions into scalar // that's special ops: SoftMax, SoftMaxDerivative, LogSoftMax, IsMax if (opNum >= 38 && opNum <= 41) { if (shape::isVector(hostXShapeInfo) && opNum != 41) { // if that's vector, we just go directly to op in 1 block int length = shape::length(hostXShapeInfo); int block = nd4j::math::nd4j_min<int>(length, 256); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(float) * 4); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, float, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS)) } else { // going for blockwise specials int *shape = shape::shapeOf(hostXShapeInfo); switch (opNum) { case 40: // LogSoftMax case 39: // SoftMax Derivative case 38: {// softmax Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1}; int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; hipLaunchKernelGGL(( prepareShapeBuffer) , dim3(1), dim3(1), 128, *stream , dimension, maxDimension, maxShapeBuffer, shape[0]); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); //shape::printShapeInfo(maxShapeBuffer); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceFloat(tempPointers, 3, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcastFloat(tempPointers, 1, dx, xShapeInfo, special, maxShapeBuffer, result, resultShapeInfo, dimension, 1); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); // exp 3 execTransformFloat(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceFloat(tempPointers, 1, result, resultShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcastFloat(tempPointers, 3, result, resultShapeInfo, special, maxShapeBuffer, result, resultShapeInfo, dimension, 1); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); // log 3 if (opNum == 40) execTransformFloat(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams); else if (opNum == 39) execTransformFloat(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams); checkCudaErrors(hipStreamSynchronize(*stream)); delete hostMaxShapeBuffer; break; } case 41: { // IsMax along all dimensions bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } if (scalarCheat) { // if that's 1D input - we'll just go for single dim IMax op call + filler int maxIdx = (int) execIndexReduceScalarFloat(extraPointers, 0, dx, xShapeInfo, extraParams); int targetIdx = 0; if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1]; hipLaunchKernelGGL(( fillIsMaxFloat), dim3(1), dim3(128), 1536, *stream , result, shape::length(hostXShapeInfo), targetIdx); checkCudaErrors(hipStreamSynchronize(*stream)); } else { // going for dimension-based IsMax int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]); Nd4jIndex *tadMaxOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); special = reinterpret_cast<float *>(extraPointers[17]); int dimensionLength = getDeviceId(extraPointers[18]); // we call for IMax on specified dimension execIndexReduceFloat(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); // at this point, all IMax indexes are gathered, and we execute hipLaunchKernelGGL(( fillDimensionalIsMaxFloat), dim3(blockLimit), dim3(64), funcAttributes[36].sharedSizeBytes, *stream, special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets ); checkCudaErrors(hipStreamSynchronize(*stream)); } break; } default: { printf("Bad case for transformFloat\n"); break; } } } } else { // we're enforcing larger grids for Col2Im & Im2Col // TODO: for high-end gpus we might use higher values here if (opNum == 37 || opNum == 36 || opNum == 71) { launchDims.x = 512; launchDims.y = 512; launchDims.z += 512 * sizeof(float); } else if (opNum == 70) { // we'll be using shared memory to speed up reverse launchDims.z += launchDims.y * sizeof(float); } // histogram op requies additional memory chunk :( if (opNum == 48) { int length = shape::length(hostZShapeInfo); hipMalloc((void **) &maskedAllocPointer, length * launchDims.x * sizeof(float)); } if (opNum == 71) { launchDims.z += 512 * sizeof(float); } DISPATCH_SIMPLE(transformShaped, float, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS)) // we need guaranteed sync here, due to temp memory release if (nd4j::Environment::getInstance()->isDebug() || opNum == 48) checkCudaErrors(hipStreamSynchronize(*stream)); // release memory chunk if (opNum == 48) { hipFree((void *) maskedAllocPointer); } } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execTransformHalf(Nd4jPointer *extraPointers,int opNum, float16 *dx, int *xShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H20 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); int *maskedAllocPointer = allocPointer; float16 *specialPointer = reinterpret_cast<float16 *>(extraPointers[6]); int *dimension = (int *) specialPointer; int *maxDimension = dimension + 1; int *maxShapeBuffer = (int *) maxDimension + 1; float16 * special = (float16 *) maxShapeBuffer + (MAX_RANK * 2 + 4); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH20 opNum:[%i]\n", opNum); int *devTadShapeInfo = reinterpret_cast<int *> (extraPointers[10]); Nd4jIndex *devTadOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]); // simple trick to get workaround over reductions into scalar // SoftMax, SoftMaxDerivative, LogSoftMax, IsMax if (opNum >= 38 && opNum <= 41) { if (shape::isVector(hostXShapeInfo) && opNum != 41) { // if that's vector, we just go directly to op in 1 block int length = shape::length(hostXShapeInfo); int block = nd4j::math::nd4j_min<int>(length, 256); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(float16) * 4); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, float16, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS)) } else { // going for blockwise specials int *shape = shape::shapeOf(hostXShapeInfo); switch (opNum) { case 40: // LogSoftMax case 39: // SoftMax Derivative case 38: {// softmax Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1}; int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; // FIXME: fix this hipLaunchKernelGGL(( prepareShapeBuffer) , dim3(1), dim3(1), 128, *stream , dimension, maxDimension, maxShapeBuffer, shape[0]); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); //shape::printShapeInfo(maxShapeBuffer); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceHalf(tempPointers, 3, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcastHalf(tempPointers, 1, dx, xShapeInfo, special, maxShapeBuffer, result, resultShapeInfo, dimension, 1); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); // exp 3 execTransformHalf(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceHalf(tempPointers, 1, result, resultShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcastHalf(tempPointers, 3, result, resultShapeInfo, special, maxShapeBuffer, result, resultShapeInfo, dimension, 1); if (opNum == 40) { if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); execTransformHalf(tempPointers, 47, result, resultShapeInfo, result, resultShapeInfo, extraParams); } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); // log 3 if (opNum == 40) execTransformHalf(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams); else if (opNum == 39) execTransformHalf(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams); checkCudaErrors(hipStreamSynchronize(*stream)); delete hostMaxShapeBuffer; break; } case 41: { // IsMax along all dimensions bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } if (scalarCheat) { // 1D input, aka vector int maxIdx = (int) execIndexReduceScalarHalf(extraPointers, 0, dx, xShapeInfo, extraParams); int targetIdx = 0; if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1]; hipLaunchKernelGGL(( fillIsMaxHalf), dim3(1), dim3(128), 1536, *stream , result, shape::length(hostXShapeInfo), targetIdx); } else { // going for dimension-based IsMax int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]); Nd4jIndex *tadMaxOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); special = reinterpret_cast<float16 *>(extraPointers[17]); int dimensionLength = getDeviceId(extraPointers[18]); // we call for IMax on specified dimension execIndexReduceHalf(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); // at this point, all IMax indexes are gathered, and we execute hipLaunchKernelGGL(( fillDimensionalIsMaxHalf), dim3(blockLimit), dim3(64), funcAttributes[36].sharedSizeBytes, *stream, special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets ); checkCudaErrors(hipStreamSynchronize(*stream)); } break; } default: { printf("Bad case for transformHalf\n"); break; } } } } else { // Im2Col & Col2Im enforced grids if (opNum == 37 || opNum == 36 || opNum == 71) { launchDims.x = 512; launchDims.y = 512; launchDims.z += 512 * sizeof(float16); } else if (opNum == 70) { // we'll be using shared memory to speed up reverse launchDims.z += launchDims.y * sizeof(float); } // Histogram op requires additional memory chunk if (opNum == 48) { int length = shape::length(hostZShapeInfo); hipMalloc((void **)&maskedAllocPointer, length * launchDims.x * sizeof(float16)); } if (opNum == 71) { launchDims.z += 512 * sizeof(float); } // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, float16, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS)) // we need guaranteed sync here, due to temp memory release if (nd4j::Environment::getInstance()->isDebug() || opNum == 48) checkCudaErrors(hipStreamSynchronize(*stream)); // release that histogram memory chunk if (opNum == 48) { hipFree((void *)maskedAllocPointer); } } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int *xShapeInfo, float *result, int *resultShapeInfo, float *extraParams, int *xIndexes, int *resultIndexes) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F21 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF21 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( transformFloatIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultIndexes, allocPointer, reductionPointer); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int *xShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams, int *xIndexes, int *resultIndexes) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H21 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH21 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( transformHalfIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultIndexes, allocPointer, reductionPointer); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } template <typename T> __device__ void flattenKernelGeneric(int dOffset, char order, T *result, int *resultShapeInfo, T *input, int *inputShapeInfo, int *allocationPointer) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 4, 4, sizeof(shape::TAD), 2); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; int *zShape = shape::shapeOf(resultShapeInfo); int *zStride = shape::stride(resultShapeInfo); int *yShape = shape::shapeOf(inputShapeInfo); int *yStride = shape::stride(inputShapeInfo); char yOrder = shape::order(inputShapeInfo); int len = shape::length(inputShapeInfo); int resultEWS = shape::elementWiseStride(resultShapeInfo); int inputEWS = shape::elementWiseStride(inputShapeInfo); if (yOrder == order) { if (resultEWS >= 1 && inputEWS >= 1) { for (int i = tid; i < len; i+= gridDim.x * blockDim.x) { result[i * resultEWS + dOffset] = input[i * inputEWS]; } } else { int rank = shape::rank(inputShapeInfo); int coord[MAX_RANK]; if(order == 'f') { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2sub(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i + dOffset] = input[offset]; } } else { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2subC(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i + dOffset] = input[offset]; } } } } else { int rank = shape::rank(inputShapeInfo); int coord[MAX_RANK]; if(order == 'f') { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2sub(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i+dOffset] = input[offset]; } } else { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2subC(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i+dOffset] = input[offset]; } } } } extern "C" __global__ void flattenKernelDouble(int offset, char order, double *result, int *resultShapeInfo, double *input, int *inputShapeInfo, int *allocationPointer) { flattenKernelGeneric<double>( offset, order, result, resultShapeInfo, input, inputShapeInfo, allocationPointer); } extern "C" __global__ void flattenKernelFloat(int offset, char order, float *result, int *resultShapeInfo, float *input, int *inputShapeInfo, int *allocationPointer) { flattenKernelGeneric<float>( offset, order, result, resultShapeInfo, input, inputShapeInfo, allocationPointer); } extern "C" __global__ void flattenKernelHalf(int offset, char order, float16 *result, int *resultShapeInfo, float16 *input, int *inputShapeInfo, int *allocationPointer) { flattenKernelGeneric<float16>( offset, order, result, resultShapeInfo, input, inputShapeInfo, allocationPointer); } /** * Append an input array * to the end of a flat array * in a particular order * @param offset the offset of the array to start at * @param order the order * @param result the result array * @param resultShapeInfo the shape info for te array * @param input the input for the array * @param inputShapeInfo the shape information for that array */ void NativeOps::flattenFloat( Nd4jPointer *extraPointers, int offset, char order, float *result, int *resultShapeInfo, float *input, int *inputShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F22 opNum:[7]\n"); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF222 opNum:[7]\n"); hipLaunchKernelGGL(( flattenKernelFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::flattenHalf( Nd4jPointer *extraPointers, int offset, char order, float16 *result, int *resultShapeInfo, float16 *input, int *inputShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H22 opNum:[7]\n"); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH222 opNum:[7]\n"); hipLaunchKernelGGL(( flattenKernelHalf), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * Append an input array * to the end of a flat array * in a particular order * @param offset the offset of the array to start at * @param order the order * @param result the result array * @param resultShapeInfo the shape info for te array * @param input the input for the array * @param inputShapeInfo the shape information for that array */ void NativeOps::flattenDouble( Nd4jPointer *extraPointers, int offset, char order, double *result, int *resultShapeInfo, double *input, int *inputShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D30 opNum:[7]\n"); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[34]); hipLaunchKernelGGL(( flattenKernelDouble), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::checkP2P() { int curDevice = 0; hipGetDevice(&curDevice); int devCnt = 0; hipGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; bool tempSupport = true; if (devCnt > 1) { for (int x = 0; x < devCnt; x++) { for (int y = 0; y < devCnt; y++) { if (x == y) continue; int canAccess = 0; hipSetDevice(x); hipDeviceCanAccessPeer(&canAccess, x , y); if (!canAccess) { tempSupport = false; break; } } } supportedP2P = tempSupport; hipSetDevice(curDevice); } else { // if we have only 1 device - we say that we support P2P, since all data will be on 1 device supportedP2P = true; } } void NativeOps::enableP2P(bool enable) { if (enable == allowedP2P) return; int curDevice = 0; hipGetDevice(&curDevice); int devCnt = 0; hipGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; if (devCnt > 1) { for (int x = 0; x < devCnt; x++) { for (int y = 0; y < devCnt; y++) { if (x == y) continue; int canAccess = 0; hipSetDevice(x); hipDeviceCanAccessPeer(&canAccess, x , y); if (canAccess) { if (enable) { hipDeviceEnablePeerAccess(y, 0); } else { hipDeviceDisablePeerAccess(y); } } else { if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", x, y); } } } hipSetDevice(curDevice); } allowedP2P = enable; hipSetDevice(curDevice); } bool NativeOps::isP2PAvailable() { return supportedP2P; } void NativeOps::initializeDevicesAndFunctions() { int devCnt = 0; hipGetDeviceCount(&devCnt); deviceProperties = new hipDeviceProp_t[devCnt]; for (int i = 0; i < devCnt; i++) { hipSetDevice(i); hipGetDeviceProperties(&deviceProperties[i], i); hipDeviceSetLimit(hipLimitStackSize, 4096); } hipSetDevice(0); checkP2P(); // enabling p2p gpu access if it's supported if (supportedP2P && devCnt > 1) enableP2P(allowedP2P); hipFuncGetAttributes(&funcAttributes[0], (void *)transformFloatIndexes); //void (*transformFloatPointer1)(int opNum, float *dy,int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocationPointer, float *reductionPointer) = transformFloat; // FIXME hipFuncGetAttributes(&funcAttributes[1], transformFloatIndexes); //void (*transformFloatPointer2)(int opNum, Nd4jIndex n, float *dy, int incy, float *params, float *result,int resultStride, int *allocationPointer, float *reductionPointer) = transformFloat; // FIXME hipFuncGetAttributes(&funcAttributes[2], transformFloatIndexes); //hipFuncGetAttributes(&funcAttributes[3], (void *)functions::summarystats::summaryStatsReduceFloat); //hipFuncGetAttributes(&funcAttributes[4], (void *)scalarFloatIndexes); // void (*scalarFloatPointer1)(int opNum, float dx,float *dy, int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarFloat; // hipFuncGetAttributes(&funcAttributes[5], scalarFloatIndexes); // void (*scalarFloatPointer2)(int opNum, Nd4jIndex n,float dx, float *dy, int incy, float *params, float *result,int resultStride, int *allocPointer) = scalarFloat; // hipFuncGetAttributes(&funcAttributes[6], scalarFloatIndexes); hipFuncGetAttributes(&funcAttributes[7], reduce3Float); hipFuncGetAttributes(&funcAttributes[8], reduceSimpleGenericXD_0_float); // printf("reduceFloat regs: [%i], static shmem: [%i]\n", funcAttributes[8].numRegs, funcAttributes[8].sharedSizeBytes); hipFuncGetAttributes(&funcAttributes[28], reduceSimpleGeneric1D_0_float); // 1D // printf("reduceFloat1D regs: [%i], static shmem: [%i]\n", funcAttributes[28].numRegs, funcAttributes[28].sharedSizeBytes); hipFuncGetAttributes(&funcAttributes[29], reduceSimpleGeneric3D_0_float); // 6D // printf("reduceFloat6D regs: [%i], static shmem: [%i]\n", funcAttributes[29].numRegs, funcAttributes[29].sharedSizeBytes); hipFuncGetAttributes(&funcAttributes[30], flattenKernelFloat); hipFuncGetAttributes(&funcAttributes[31], concatKernelFloat); // hipFuncGetAttributes(&funcAttributes[9], pairWiseTransformFloat); // hipFuncGetAttributes(&funcAttributes[10], pairWiseTransformFloatIndex); // hipFuncGetAttributes(&funcAttributes[11], pairWiseTransformStridedFloat); hipFuncGetAttributes(&funcAttributes[12], broadcastSimple_0_float); hipFuncGetAttributes(&funcAttributes[13], indexReduceFloat); ///////////////////////////////////////// Doubles are separate, just in case of... hipFuncGetAttributes(&funcAttributes[14], transformDoubleIndexes); // void (*transformDoublePointer1)(int opNum, double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocationPointer, double *reductionPointer) = transformDouble; // FIXME hipFuncGetAttributes(&funcAttributes[15], transformDoubleIndexes); //void (*transformDoublePointer2)(int opNum, Nd4jIndex n, double *dy, int incy, double *params, double *result,int resultStride, int *allocationPointer, double *reductionPointer) = transformDouble; // FIXME hipFuncGetAttributes(&funcAttributes[16], transformDoubleIndexes); //hipFuncGetAttributes(&funcAttributes[17], functions::summarystats::summaryStatsReduceDouble); // hipFuncGetAttributes(&funcAttributes[18], scalarDoubleIndexes); //void (*scalarDoublePointer1)(int opNum, double dx,double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarDouble; // hipFuncGetAttributes(&funcAttributes[19], scalarDoubleIndexes); //void (*scalarDoublePointer2)(int opNum, Nd4jIndex n,double dx, double *dy, int incy, double *params, double *result,int resultStride, int *allocPointer) = scalarDouble; // hipFuncGetAttributes(&funcAttributes[20], scalarDoubleIndexes); hipFuncGetAttributes(&funcAttributes[21], reduce3Double); hipFuncGetAttributes(&funcAttributes[22], reduceSimpleGenericXD_0_double); // hipFuncGetAttributes(&funcAttributes[23], pairWiseTransformDouble); // hipFuncGetAttributes(&funcAttributes[24], pairWiseTransformDoubleIndex); // hipFuncGetAttributes(&funcAttributes[25], pairWiseTransformStridedDouble); hipFuncGetAttributes(&funcAttributes[26], broadcastSimple_0_double); hipFuncGetAttributes(&funcAttributes[27], indexReduceDouble); hipFuncGetAttributes(&funcAttributes[32], reduceSimpleGeneric1D_0_double); // 1D hipFuncGetAttributes(&funcAttributes[33], reduceSimpleGeneric3D_0_double); // 6D hipFuncGetAttributes(&funcAttributes[34], flattenKernelDouble); hipFuncGetAttributes(&funcAttributes[35], concatKernelDouble); hipFuncGetAttributes(&funcAttributes[36], fillDimensionalIsMaxFloat); hipFuncGetAttributes(&funcAttributes[37], fillDimensionalIsMaxDouble); hipFuncGetAttributes(&funcAttributes[38], concatKernelScalarFloat); hipFuncGetAttributes(&funcAttributes[39], concatKernelScalarDouble); hipFuncGetAttributes(&funcAttributes[40], concatKernelVStackFloat); hipFuncGetAttributes(&funcAttributes[41], concatKernelVStackDouble); hipFuncGetAttributes(&funcAttributes[42], concatKernelHStackFloat); hipFuncGetAttributes(&funcAttributes[43], concatKernelHStackDouble); ///////////////////////// hipFuncGetAttributes(&funcAttributes[44], averagingKernelHalf); hipFuncGetAttributes(&funcAttributes[45], averagingKernelFloat); hipFuncGetAttributes(&funcAttributes[46], averagingKernelDouble); // //hipFuncGetAttributes(&funcAttributes[47], scalarAlongDimension_0_float); //hipFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_float16); //hipFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_double); } void NativeOps::initializeFunctions(Nd4jPointer *functions) { nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions); /* this->hipblasSgemv = (CublasSgemv)functions[0]; this->hipblasDgemv = (CublasDgemv)functions[1]; this->hipblasHgemm = (CublasHgemm)functions[2]; this->hipblasSgemm = (CublasSgemm)functions[3]; this->hipblasDgemm = (CublasDgemm)functions[4]; this->cublasSgemmEx = (CublasSgemmEx)functions[5]; this->hipblasHgemmBatched = (CublasHgemmBatched)functions[6]; this->hipblasSgemmBatched = (CublasSgemmBatched)functions[7]; this->hipblasDgemmBatched = (CublasDgemmBatched)functions[8]; */ } /** * This method acquires memory chunk of requested size on host side * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param flags optional parameter */ Nd4jPointer NativeOps::mallocHost(Nd4jIndex memorySize, int flags) { Nd4jPointer pointer; // hipHostMallocMapped |hipHostMallocPortable hipError_t res = hipHostMalloc((void **)&pointer, memorySize, hipHostMallocDefault); if (res != 0) pointer = 0L; return pointer; } /** * This method acquires memory chunk of requested size on specified device * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc * @param flags optional parameter */ Nd4jPointer NativeOps::mallocDevice(Nd4jIndex memorySize, Nd4jPointer ptrToDeviceId, int flags) { Nd4jPointer pointer; hipError_t res = hipMalloc((void **)&pointer, memorySize); if (res != 0) pointer = 0L; return pointer; } /** * This method releases previously allocated host memory space * * @param pointer pointer that'll be freed */ int NativeOps::freeHost(Nd4jPointer pointer) { hipError_t res = hipHostFree((void *) pointer); if (res != 0) pointer = 0L; return 1L; } /** * This method releases previously allocated memory space on device * * @param pointer pointer that'll be freed * @param ptrToDeviceId pointer to deviceId. */ int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) { hipError_t res = hipFree((void *)pointer); if (res != 0) pointer = 0L; return 1L; } Nd4jPointer NativeOps::createContext() { return 0L; } Nd4jPointer NativeOps::createStream() { Nd4jPointer nativeStream = (Nd4jPointer) malloc(sizeof(hipStream_t)); hipError_t result = hipStreamCreate((hipStream_t *) &nativeStream); checkCudaErrors(result); if (result != 0) return 0L; else return nativeStream; } Nd4jPointer NativeOps::createEvent() { Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(hipEvent_t)); hipError_t result = hipEventCreateWithFlags((hipEvent_t *) &nativeEvent, hipEventDisableTiming); checkCudaErrors(result); if (result != 0) return 0L; else return nativeEvent; } int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) { hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event); hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream); hipError_t result = hipEventRecord(*pEvent, *pStream); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) { int deviceId = getDeviceId(ptrToDeviceId); hipError_t result = hipSetDevice(deviceId); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } Nd4jIndex NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; hipGetDevice(&orig); if (device >= 0 && device != orig) { hipSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { hipSetDevice(orig); } return (Nd4jIndex) memFree; } Nd4jIndex NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; hipGetDevice(&orig); if (device >= 0 && device != orig) { hipSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { hipSetDevice(orig); } return (Nd4jIndex) memTotal; } int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) { return memcpyAsync(dst, src, size, flags, reserved); } int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved); hipMemcpyKind kind; if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*pStream)); switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } case 3: { kind = hipMemcpyDeviceToDevice; } break; default: { printf("UNDEFINED MEMCPY!\n"); break; } } hipError_t result = hipMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream); if (result != 0) { checkCudaErrors(result); printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], result: [%i]\n", src, dst, size, flags, (int) result ); fflush(stdout); fflush(stderr); return 0L; } else return 1; } int NativeOps::memset(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) { hipError_t result = hipMemset((void *) dst, value, (size_t) size); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved); hipError_t result = hipMemsetAsync((void *) dst, value, (size_t) size, *pStream); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::destroyEvent(Nd4jPointer event) { hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event); hipError_t result = hipEventDestroy(*pEvent); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::streamSynchronize(Nd4jPointer stream) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream); hipError_t result = hipStreamSynchronize(*pStream); checkCudaErrors(result); if (result != 0) return 0L; else return 1L; } int NativeOps::eventSynchronize(Nd4jPointer event) { hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event); hipError_t result = hipEventSynchronize(*pEvent); checkCudaErrors(result); if (result != 0) return 0L; else return 1L; } int NativeOps::getAvailableDevices() { int devCnt = 0; hipGetDeviceCount(&devCnt); return devCnt; } void NativeOps::enableDebugMode(bool reallyEnable) { nd4j::Environment::getInstance()->setDebug(reallyEnable); } void NativeOps::setGridLimit(int gridSize) { if (gridSize > 8192) gridSize = 8192; if (gridSize < 1) gridSize = 1; blockLimit = gridSize; } int NativeOps::ompGetMaxThreads() { return maxThreads; } int NativeOps::ompGetNumThreads() { return maxThreads; } void NativeOps::setOmpNumThreads(int threads) { if (threads > 1024) threads = 1024; if (threads < 32) threads = 32; maxThreads = threads; } void NativeOps::enableVerboseMode(bool reallyEnable) { nd4j::Environment::getInstance()->setVerbose(reallyEnable); } int NativeOps::getDeviceMajor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].major; } int NativeOps::getDeviceMinor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].minor; } const char * NativeOps::getDeviceName(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].name; } /** * Concatneate multi array of the same shape together * along a particular dimension */ void NativeOps::concatFloat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]); // numArrays will be used as number of TADs, so each block process 1 input int smem = 0; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hostShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') { isVstack = false; break; } } } // let's try to fit N-dimensional vstack if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') { Nd4jIndex length0 = shape::length(hostShapePointers[0]); isVstack = true; for (int i = 0; i < numArrays; i++) { if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going scalar concat\n"); smem = funcAttributes[38].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelScalarFloat), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isVstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going VStack concat\n"); smem = funcAttributes[40].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelVStackFloat), dim3(128), dim3(512), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isHstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going HStack concat\n"); smem = funcAttributes[42].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelHStackFloat), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going generic concat\n"); //smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280); int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *devZOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); hipLaunchKernelGGL(( concatKernelFloat), dim3(2048), dim3(128), funcAttributes[31].sharedSizeBytes , *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets); } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::concatHalf( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float16 *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]); // numArrays will be used as number of TADs, so each block process 1 input int smem = 0; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hostShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') { isVstack = false; break; } } } // let's try to fit N-dimensional vstack if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') { Nd4jIndex length0 = shape::length(hostShapePointers[0]); isVstack = true; for (int i = 0; i < numArrays; i++) { if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going scalar concat\n"); smem = funcAttributes[38].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelScalarHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isVstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going VStack concat\n"); smem = funcAttributes[40].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelVStackHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isHstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going HStack concat\n"); smem = funcAttributes[42].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelHStackHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going generic concat\n"); //smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280); int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *devZOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); hipLaunchKernelGGL(( concatKernelHalf), dim3(2048), dim3(128), funcAttributes[31].sharedSizeBytes, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets); } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("sharedMemory requested for concatHalf: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::specialConcatFloat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { nd4j::SpecialMethods<float>::concatCpuGeneric( dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo); } void NativeOps::specialConcatHalf( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float16 *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { nd4j::SpecialMethods<float16>::concatCpuGeneric( dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo); } /** * Concatneate multi array of the same shape together * along a particular dimension */ void NativeOps::specialConcatDouble( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, double *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { nd4j::SpecialMethods<double>::concatCpuGeneric( dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo); } /** * Concatneate multi array of the same shape together * along a particular dimension */ void NativeOps::concatDouble( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, double *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]); // numArrays will be used as number of TADs, so each block process 1 input int smem = 0; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hostShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') { isVstack = false; break; } } } // let's try to fit N-dimensional vstack if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') { Nd4jIndex length0 = shape::length(hostShapePointers[0]); isVstack = true; for (int i = 0; i < numArrays; i++) { if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going scalar concat\n"); smem = funcAttributes[39].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelScalarDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isVstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going VStack concat\n"); smem = funcAttributes[41].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelVStackDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isHstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going HStack concat\n"); smem = funcAttributes[43].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelHStackDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going generic concat\n"); int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *devZOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); hipLaunchKernelGGL(( concatKernelDouble), dim3(2048), dim3(128), funcAttributes[35].sharedSizeBytes, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets); } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("sharedMemory requested for concatDouble: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); checkCudaErrors(hipStreamSynchronize(*stream)); } /** * This method saves */ void NativeOps::tadOnlyShapeInfo(int *xShapeInfo, int *dimension, int dimensionLength, int *target, Nd4jIndex *offsets) { shape::TAD *tad = new shape::TAD(); tad->init(xShapeInfo, dimension, dimensionLength); //tad->setOutputBuffer(target); tad->createTadOnlyShapeInfo(); tad->createOffsets(); std::memcpy((void *) target, tad->tadOnlyShapeInfo, (tad->tadOnlyShapeInfo[0] * 2 + 4) * sizeof(int)); std::memcpy((void *) offsets, tad->tadOffsets, tad->numTads * sizeof(Nd4jIndex)); /* shape::printShapeInfoLinear(hostXShapeInfo); shape::printShapeInfoLinear(tad->tadOnlyShapeInfo); shape::printShapeInfoLinear(target); */ delete tad; } int NativeOps::memcpyConstantAsync(Nd4jIndex dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved); hipMemcpyKind kind; if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*pStream)); switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } case 3: { kind = hipMemcpyDeviceToDevice; } break; } //hipError_t result = hipMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream); hipError_t result = hipMemcpyToSymbolAsync(deviceConstantMemory, (const void *) src, size, dst, kind, *pStream); checkCudaErrors(result); if (result != 0) { printf("Symbol failed on [%lu] -> [%lu], size: [%i], direction: [%i]\n", src, dst, size, flags ); return 0L; } else return 1; } Nd4jPointer NativeOps::getConstantSpace() { Nd4jPointer dConstAddr; hipError_t result = hipGetSymbolAddress((void **)&dConstAddr, deviceConstantMemory); return dConstAddr; } void NativeOps::pullRowsHalf(Nd4jPointer *extraPointers, float16 *x, int *xShapeInfo, float16 *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); hipLaunchKernelGGL(( pullRowsKernelHalf), dim3(64), dim3(256), 1024, *stream, x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::pullRowsFloat(Nd4jPointer *extraPointers, float *x, int *xShapeInfo, float *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); hipLaunchKernelGGL(( pullRowsKernelFloat), dim3(64), dim3(256), 1024, *stream, x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::pullRowsDouble(Nd4jPointer *extraPointers, double *x, int *xShapeInfo, double *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); hipLaunchKernelGGL(( pullRowsKernelDouble), dim3(64), dim3(256), 1024, *stream, x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::averageHalf(Nd4jPointer *extras, Nd4jPointer *dx, float16 *dz, int n, Nd4jIndex length, bool propagate) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); float16 **x = reinterpret_cast<float16 **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("averageHalf called\n"); // launching on gpu if (mode == 0) { dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float16), funcAttributes[44]); hipLaunchKernelGGL(( averagingKernelHalf), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length, propagate); checkCudaErrors(hipStreamSynchronize(*stream)); } else { nd4j::SpecialMethods<float16>::averageGeneric(x, dz, n, length, propagate); } } void NativeOps::averageFloat(Nd4jPointer *extras, Nd4jPointer *dx, float *dz, int n, Nd4jIndex length, bool propagate) { hipStream_t * stream = reinterpret_cast<hipStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); float **x = reinterpret_cast<float **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("averageFloat called\n"); // launching on gpu if (mode == 0) { dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]); hipLaunchKernelGGL(( averagingKernelFloat), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length, propagate); checkCudaErrors(hipStreamSynchronize(*stream)); } else { // launching on host memory nd4j::SpecialMethods<float>::averageGeneric(x, dz, n, length, propagate); } } void NativeOps::averageDouble(Nd4jPointer *extras, Nd4jPointer *dx, double *dz, int n, Nd4jIndex length, bool propagate) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); double **x = reinterpret_cast<double **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("averageDouble called\n"); // launching on gpu if (mode == 0) { dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]); averagingKernelDouble << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (x, dz, n, length, propagate); checkCudaErrors(hipStreamSynchronize(*stream)); } else { nd4j::SpecialMethods<double>::averageGeneric(x, dz, n, length, propagate); } } void NativeOps::accumulateHalf(Nd4jPointer *extras, Nd4jPointer *dx, float16 *dz, int n, Nd4jIndex length) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); float16 **x = reinterpret_cast<float16 **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("accumulateHalf called\n"); // launching on gpu if (mode == 0) { dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float16), funcAttributes[44]); hipLaunchKernelGGL(( accumulateKernelHalf), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length); checkCudaErrors(hipStreamSynchronize(*stream)); } else { nd4j::SpecialMethods<float16>::accumulateGeneric(x, dz, n, length); } } void NativeOps::accumulateFloat(Nd4jPointer *extras, Nd4jPointer *dx, float *dz, int n, Nd4jIndex length) { hipStream_t * stream = reinterpret_cast<hipStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); float **x = reinterpret_cast<float **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("accumulateFloat called\n"); // launching on gpu if (mode == 0) { dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]); hipLaunchKernelGGL(( accumulateKernelFloat), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length); checkCudaErrors(hipStreamSynchronize(*stream)); } else { // launching on host memory nd4j::SpecialMethods<float>::accumulateGeneric(x, dz, n, length); } } void NativeOps::accumulateDouble(Nd4jPointer *extras, Nd4jPointer *dx, double *dz, int n, Nd4jIndex length) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); double **x = reinterpret_cast<double **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("accumulateDouble called\n"); // launching on gpu if (mode == 0) { dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]); accumulateKernelDouble << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (x, dz, n, length); checkCudaErrors(hipStreamSynchronize(*stream)); } else { nd4j::SpecialMethods<double>::accumulateGeneric(x, dz, n, length); } } void NativeOps::shuffleDouble(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); double **x = reinterpret_cast<double **>(dx); double **z = reinterpret_cast<double **>(dz); int **xShape = reinterpret_cast<int **>(xShapeInfo); int **zShape = reinterpret_cast<int **>(zShapeInfo); int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo); Nd4jIndex **tadOffset = reinterpret_cast<Nd4jIndex **>(tadOffsets); hipLaunchKernelGGL(( shuffleKernelDouble), dim3(32), dim3(128), 1024, *stream, x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::shuffleFloat(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); float **x = reinterpret_cast<float **>(dx); float **z = reinterpret_cast<float **>(dz); int **xShape = reinterpret_cast<int **>(xShapeInfo); int **zShape = reinterpret_cast<int **>(zShapeInfo); int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo); Nd4jIndex **tadOffset = reinterpret_cast<Nd4jIndex **>(tadOffsets); hipLaunchKernelGGL(( shuffleKernelFloat), dim3(32), dim3(128), 1024, *stream, x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::shuffleHalf(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); float16 **x = reinterpret_cast<float16 **>(dx); float16 **z = reinterpret_cast<float16 **>(dz); int **xShape = reinterpret_cast<int **>(xShapeInfo); int **zShape = reinterpret_cast<int **>(zShapeInfo); int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo); Nd4jIndex **tadOffset = reinterpret_cast<Nd4jIndex **>(tadOffsets); hipLaunchKernelGGL(( shuffleKernelHalf), dim3(32), dim3(128), 1024, *stream, x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateStridedFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float *dx, int xStride, float *dy, int yStride, float *dz, int zStride, float *extraA, float *extraB, float scalarA, float scalarB) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); // metaPredicateStridedFloat<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); /* if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB), float, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } */ functions::grid::GRIDStrided<float>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateStridedDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, double *dx, int xStride, double *dy, int yStride, double *dz, int zStride, double *extraA, double *extraB, double scalarA, double scalarB) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); // metaPredicateStridedDouble<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); /* if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB), double, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } */ functions::grid::GRIDStrided<double>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateStridedHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float16 *dx, int xStride, float16 *dy, int yStride, float16 *dz, int zStride, float16 *extraA, float16 *extraB, float scalarA, float scalarB) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); // metaPredicateStridedHalf<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); float16 scalA = (float16) scalarA; float16 scalB = (float16) scalarB; /* if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalA, scalB), float16, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } */ functions::grid::GRIDStrided<float16>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateReduceFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, float *extraA, float *extraB, float scalarA, float scalarB, bool scalarReturned) { // no-op hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); /* metaPredicateReduceFloat(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, int *tadShapeInfo, int *tadOffsets, float *reductionBuffer, float *extraA, float *extraB, float scalarA, float scalarB) { */ // metaPredicateReduceFloat<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, nullptr, extraA, extraB, scalarA, scalarB, scalarReturned); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateShapeDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, double *dx, int *xShapeInfo, double *dy, int *yShapeInfo, double *dz, int *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB) { // no-op; hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); /* if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), double, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } */ functions::grid::GRIDShaped<double>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateShapeHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float16 *dx, int *xShapeInfo, float16 *dy, int *yShapeInfo, float16 *dz, int *zShapeInfo, float16 *extraA, float16 *extraB, float scalarA, float scalarB) { // no-op; hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); // we have to converf float -> fp16 prior to kernel call float16 scalA = (float16) scalarA; float16 scalB = (float16) scalarB; /* if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalA, scalB), float16, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } */ functions::grid::GRIDShaped<float16>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateShapeFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB) { // no-op; hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); /* if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), float, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } */ functions::grid::GRIDShaped<float>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } bool NativeOps::isExperimentalEnabled() { return experimentalSupport; } void NativeOps::setOmpMinThreads(int threads) { minThreads = nd4j::math::nd4j_max<int>(32, threads); minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads); } int NativeOps::getDevice() { int curDevice = -1; hipGetDevice(&curDevice); return curDevice; } void NativeOps::setElementThreshold(int num) { // this is no-op for CUDA } void NativeOps::setTADThreshold(int num) { // this is no-op for CUDA } void NativeOps::execScalarFloat(Nd4jPointer *extraPointers,int opNum, float *x, int *xShapeInfo, float *z, int *zShapeInfo, float *scalars, float *extraParams, int *dimension, int dimensionLength) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTadShapeInfo = reinterpret_cast<int *>(extraPointers[9]); //dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]),hostXShapeInfo, hostTadShapeInfo, funcAttributes[47] ,dimensionLength, sizeof(float), 0); dim3 launchDims = dim3(256, 256, 1024); // this macro builds bunch of IF/ELSE selectors for kernel launc h //DISPATCH_SIMPLE(scalarAlongDimension, float, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS)) functions::scalar::ScalarTransform<float>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execScalarDouble(Nd4jPointer *extraPointers,int opNum, double *x, int *xShapeInfo, double *z, int *zShapeInfo, double *scalars, double *extraParams, int *dimension, int dimensionLength) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 256, 1024); // this macro builds bunch of IF/ELSE selectors for kernel launch //DISPATCH_SIMPLE(scalarAlongDimension, double, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS)) functions::scalar::ScalarTransform<double>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execScalarHalf(Nd4jPointer *extraPointers,int opNum, float16 *x, int *xShapeInfo, float16 *z, int *zShapeInfo, float16 *scalars, float16 *extraParams, int *dimension, int dimensionLength) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 256, 1024); /* int *tadShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *tadOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *tadShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *tadOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); */ // this macro builds bunch of IF/ELSE selectors for kernel launch //DISPATCH_SIMPLE(scalarAlongDimension, float16, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS)) functions::scalar::ScalarTransform<float16>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execAggregateFloat(Nd4jPointer *extraPointers,int opNum, float **arguments, int numArguments, int **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, float *realArguments, int numRealArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateSimple, float, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS)) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execAggregateDouble(Nd4jPointer *extraPointers,int opNum, double **arguments, int numArguments, int **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, double *realArguments, int numRealArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateSimple, double, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS)) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execAggregateHalf(Nd4jPointer *extraPointers,int opNum, float16 **arguments, int numArguments, int **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, float16 *realArguments, int numRealArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateSimple, float16, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS)) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execAggregateBatchFloat(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) { // not implemented yet hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateBatchSimple, float, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execAggregateBatchDouble(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) { // not implemented yet hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateBatchSimple, double, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execAggregateBatchHalf(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) { // not implemented yet hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateBatchSimple, float16, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *z, int *zShapeBuffer, float *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) ); functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, int *xShapeBuffer, float *y, int *yShapeBuffer, float *z, int *zShapeBuffer, float *extraArguments) { dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) ); functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, int *xShapeBuffer, float *z, int *zShapeBuffer, float *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) ); functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *z, int *zShapeBuffer, double *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double))); functions::random::RandomFunction<double>::executeCudaSingle(launchDims, extraPointers, opNum, state, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, int *xShapeBuffer, double *y, int *yShapeBuffer, double *z, int *zShapeBuffer, double *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double))); functions::random::RandomFunction<double>::executeCudaTriple(launchDims, extraPointers, opNum, state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, int *xShapeBuffer, double *z, int *zShapeBuffer, double *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double))); functions::random::RandomFunction<double>::executeCudaDouble(launchDims, extraPointers, opNum, state, x, xShapeBuffer, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *z, int *zShapeBuffer, float16 *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16))); functions::random::RandomFunction<float16>::executeCudaSingle(launchDims, extraPointers, opNum, state, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, int *xShapeBuffer, float16 *y, int *yShapeBuffer, float16 *z, int *zShapeBuffer, float16 *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16))); functions::random::RandomFunction<float16>::executeCudaTriple(launchDims, extraPointers, opNum, state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, int *xShapeBuffer, float16 *z, int *zShapeBuffer, float16 *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16))); functions::random::RandomFunction<float16>::executeCudaDouble(launchDims, extraPointers, opNum, state, x, xShapeBuffer, z, zShapeBuffer, extraArguments); } Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) { unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // we don't synchronize at random initialization, it's safe to go unsync here // hipStreamSynchronize(*stream); unsigned long long *ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer); nd4j::random::RandomBuffer *buffer = new nd4j::random::RandomBuffer(seed, bufferSize, (uint64_t *) ptrHost, (uint64_t *) ptrDev); buffer->propagateToDevice(buffer, *stream); checkCudaErrors(hipStreamSynchronize(*stream)); // we generate sequence in the host memory nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // and copy it to gpu hipMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, hipMemcpyHostToDevice, *stream); checkCudaErrors(hipStreamSynchronize(*stream)); return buffer; } void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer); // FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice hipDeviceSynchronize(); delete buffer; } void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); hipStreamSynchronize(*stream); uint64_t *ptrDev = buffer->getDeviceBuffer(); // update rng state buffer->setSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); // refresh buffer on host size nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // copy back to gpu hipMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, hipMemcpyHostToDevice, *stream); } void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); hipStreamSynchronize(*stream); // update rng state buffer->reSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); } /** * * @param npyArray * @return */ Nd4jPointer NativeOps::shapeBufferForNumpy(Nd4jPointer npyArray) { /* cnpy::NpyArray *arrPointer = reinterpret_cast<cnpy::NpyArray *>(npyArray); int *shapeBuffer = shape::shapeBufferOfNpy(*arrPointer); return reinterpret_cast<Nd4jPointer>(shapeBuffer); */ cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray)); unsigned int *shape = new unsigned int[arr.shape.size()]; for(int i = 0; i < arr.shape.size(); i++) { shape[i] = arr.shape[i]; } int *shapeBuffer = shape::shapeBufferOfNpy(arr.shape.size(), shape, arr.fortranOrder); delete[] shape; return reinterpret_cast<Nd4jPointer>(shapeBuffer); } /** * * @param npyArray * @return */ Nd4jPointer NativeOps::dataPointForNumpy(Nd4jPointer npyArray) { char *buff = reinterpret_cast<char *>(npyArray); //printf("Pointer contents %s\n",buff); cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray)); cnpy::NpyArray *arrPointer = &arr; char *data = arrPointer->data; if(arrPointer->wordSize == sizeof(float)) { float *floatData = reinterpret_cast<float *>(data); return reinterpret_cast<Nd4jPointer>(floatData); } else if(arrPointer->wordSize == sizeof(double)) { double *doubleData = reinterpret_cast<double *>(data); return reinterpret_cast<Nd4jPointer >(doubleData); } return reinterpret_cast<Nd4jPointer >(0); } /** * Load a numpy array from a file * and return it as an Nd4jPointer * @param path * @return */ Nd4jPointer NativeOps::numpyFromFile(std::string path) { /*cnpy::NpyArray arr = cnpy::npyLoad(path); return reinterpret_cast<Nd4jPointer >(&arr); */ char *numpyBuffer = cnpy::loadFile(path.data()); return reinterpret_cast<Nd4jPointer >(numpyBuffer); } void NativeOps::releaseNumpy(Nd4jPointer npyArray) { free((void *) npyArray); } /** * Return the length of a shape buffer * based on the pointer * @param buffer the buffer pointer to check * @return */ int NativeOps::lengthForShapeBufferPointer(Nd4jPointer buffer) { int *shapeBuffer = reinterpret_cast<int *>(buffer); return shape::shapeInfoLength(shape::rank(shapeBuffer)); } /** * Get the element size for a numpy array * @param npyArray the numpy array's address * to get the length for * @return */ int NativeOps::elementSizeForNpyArray(Nd4jPointer npyArray) { cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray)); cnpy::NpyArray *arrPointer = &arr; int size = arrPointer->wordSize; return size; /* cnpy::NpyArray *arr = reinterpret_cast<cnpy::NpyArray *>(npyArray); return arr->wordSize; */ } /** * The pointer to get the address for * * @param address the address to get the pointer * @return the pointer for the given address */ Nd4jPointer NativeOps::pointerForAddress(Nd4jIndex address) { return reinterpret_cast<Nd4jPointer >(address); } void NativeOps::tearDouble(Nd4jPointer *extras, double *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); hipLaunchKernelGGL(( tearKernelDouble), dim3(512), dim3(512), 512, *stream, x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::tearFloat(Nd4jPointer *extras, float *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); hipLaunchKernelGGL(( tearKernelFloat), dim3(512), dim3(512), 512, *stream, x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::tearHalf(Nd4jPointer *extras, float16 *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); hipLaunchKernelGGL(( tearKernelHalf), dim3(512), dim3(512), 512, *stream, x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::encodeThresholdP1Float(Nd4jPointer *extras, float *dx, Nd4jIndex N, int *dz, float threshold) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); hipLaunchKernelGGL(( encoderKernelP1Float), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz, threshold); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::encodeThresholdP1Double(Nd4jPointer *extras, double *dx, Nd4jIndex N, int *dz, float threshold) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); hipLaunchKernelGGL(( encoderKernelP1Double), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz, threshold); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::encodeThresholdP1Half(Nd4jPointer *extras, float16 *dx, Nd4jIndex N, int *dz, float threshold) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); hipLaunchKernelGGL(( encoderKernelP1Half), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz, threshold); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jIndex N, int *dz) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); //encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz); // it prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::encodeThresholdP3Float(Nd4jPointer *extraPointers, float *dx, int *offsets, Nd4jIndex N, int *dz){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); hipLaunchKernelGGL(( encoderKernelP3Float), dim3(numBlocks), dim3(blockSize) , 4096, *stream, dx, offsets, N, dz); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::encodeThresholdP3Double(Nd4jPointer *extraPointers, double *dx, int *offsets, Nd4jIndex N, int *dz){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); hipLaunchKernelGGL(( encoderKernelP3Double), dim3(numBlocks), dim3(blockSize) , 4096, *stream, dx, offsets, N, dz); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::encodeThresholdP3Half(Nd4jPointer *extraPointers, float16 *dx, int *offsets, Nd4jIndex N, int *dz){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); hipLaunchKernelGGL(( encoderKernelP3Half), dim3(numBlocks), dim3(blockSize) , 4096, *stream, dx, offsets, N, dz); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::decodeThresholdFloat(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float *dz){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // we probably want to have smaller blocks here, memory writes are misaligned anyway int blockSize = 128; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); hipLaunchKernelGGL(( decoderKernelFloat), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::decodeThresholdDouble(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, double *dz){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // we probably want to have smaller blocks here, memory writes are misaligned anyway int blockSize = 128; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); hipLaunchKernelGGL(( decoderKernelDouble), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::decodeThresholdHalf(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float16 *dz){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // we probably want to have smaller blocks here, memory writes are misaligned anyway int blockSize = 128; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); hipLaunchKernelGGL(( decoderKernelHalf), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execReduce3AllDouble(Nd4jPointer *extraPointers, int opNum, double *x, int *xInfo, double *extraParamsVals, double *y, int *yInfo, double *result, int *resultShapeInfoBuffer, int *dimension, int dimensionLength, int *xTadShapeInfo, Nd4jIndex *xOffsets, int *yTadShapeInfo, Nd4jIndex *yOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D119 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(double), 2); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AD119 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( reduce3AllDouble), dim3(launchDims.x), dim3(512), (512 * 8 * 2 + 512), *stream, opNum, x, xInfo, y, yInfo, extraParamsVals, result, resultShapeInfoBuffer, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execReduce3AllFloat(Nd4jPointer *extraPointers, int opNum, float *x, int *xInfo, float *extraParamsVals, float *y, int *yInfo, float *result, int *resultShapeInfoBuffer, int *dimension, int dimensionLength, int *xTadShapeInfo, Nd4jIndex *xOffsets, int *yTadShapeInfo, Nd4jIndex *yOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F119 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(float), 2); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF119 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( reduce3AllFloat), dim3(launchDims.x), dim3(512), (512 * 4 * 2 + 512), *stream, opNum, x, xInfo, y, yInfo, extraParamsVals, result, resultShapeInfoBuffer, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execReduce3AllHalf(Nd4jPointer *extraPointers, int opNum, float16 *x, int *xInfo, float16 *extraParamsVals, float16 *y, int *yInfo, float16 *result, int *resultShapeInfoBuffer, int *dimension, int dimensionLength, int *xTadShapeInfo, Nd4jIndex *xOffsets, int *yTadShapeInfo, Nd4jIndex *yOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H119 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(float16), 2); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH119 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( reduce3AllHalf), dim3(launchDims.x), dim3(512), (512 * 2 * 2 + 512), *stream, opNum, x, xInfo, y, yInfo, extraParamsVals, result, resultShapeInfoBuffer, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::sortFloat(Nd4jPointer *extraPointers, float *x, int *xShapeInfo, bool descending) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[ 1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int xLength = shape::length(hostXShapeInfo); int xEWS = shape::elementWiseStride(hostXShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; for (int k = 2; k <= xLength; k = 2*k) { for (int j = k >> 1; j > 0; j = j >> 1) { hipLaunchKernelGGL(( cudaBitonicSortFloat), dim3(numBlocks), dim3(numThreads), 512, *stream, x, xShapeInfo, j, k, xLength, descending); } } } else { #ifdef __clang__ if (1 > 0) { #elif __GNUC__ if ((xLength > 1024 * 1024 * 10) && xEWS == 1) { b40c::radix_sort::Enactor enactor; b40c::util::DoubleBuffer<float> sort_storage(x); enactor.Sort(sort_storage, xLength); // fire reverse op if (descending) execTransformFloat(extraPointers, 70, x, xShapeInfo, x, xShapeInfo, nullptr); } else { #else if (1 > 0) { #endif int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window<<=1) { int n = window; int rev = 0; do{ int half = n >> 1; hipLaunchKernelGGL(( cudaSortFloat), dim3(numBlocks), dim3(numThreads), numThreads * 2 * sizeof(float), *stream, x, xShapeInfo, n, xLength, rev, descending); n>>=1; rev = 1; } while(n > 1); } } } checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::sortDouble(Nd4jPointer *extraPointers, double *x, int *xShapeInfo, bool descending) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int xLength = shape::length(hostXShapeInfo); int xEWS = shape::elementWiseStride(hostXShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; for (int k = 2; k <= xLength; k = 2*k) { for (int j = k >> 1; j > 0; j = j >> 1) { hipLaunchKernelGGL(( cudaBitonicSortDouble), dim3(numBlocks), dim3(numThreads), 512, *stream, x, xShapeInfo, j, k, xLength, descending); } } } else { #ifdef __clang__ if (1 > 0) { #elif __GNUC__ if ((xLength > 1024 * 1024 * 10) && xEWS == 1) { b40c::radix_sort::Enactor enactor; b40c::util::DoubleBuffer<double> sort_storage(x); enactor.Sort(sort_storage, xLength); // fire reverse op if (descending) execTransformDouble(extraPointers, 70, x, xShapeInfo, x, xShapeInfo, nullptr); } else { #else if ( 1 > 0) { #endif int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window<<=1) { int n = window; int rev = 0; do{ int half = n >> 1; hipLaunchKernelGGL(( cudaSortDouble), dim3(numBlocks), dim3(numThreads), numThreads * 2 * sizeof(double), *stream, x, xShapeInfo, n, xLength, rev, descending); n>>=1; rev = 1; } while(n > 1); } } } checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::sortHalf(Nd4jPointer *extraPointers, float16 *x, int *xShapeInfo, bool descending) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int xLength = shape::length(hostXShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; for (int k = 2; k <= xLength; k = 2*k) { for (int j = k >> 1; j > 0; j = j >> 1) { hipLaunchKernelGGL(( cudaBitonicSortHalf), dim3(numBlocks), dim3(numThreads), 512, *stream, x, xShapeInfo, j, k, xLength, descending); } } } else { // half is incompatible with radix, so only bitonic here int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window<<=1) { int n = window; int rev = 0; do{ int half = n >> 1; hipLaunchKernelGGL(( cudaSortHalf), dim3(numBlocks), dim3(numThreads), numThreads * 2 * sizeof(float16), *stream, x, xShapeInfo, n, xLength, rev, descending); n>>=1; rev = 1; } while(n > 1); } } checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::sortTadFloat(Nd4jPointer *extraPointers, float *x, int *xShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, bool descending) { // to be implemented hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); hipLaunchKernelGGL(( cudaSortTadFloat), dim3(512), dim3(512), 1088 * sizeof(float), *stream, x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::sortTadHalf(Nd4jPointer *extraPointers, float16 *x, int *xShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, bool descending) { // to be implemented hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); hipLaunchKernelGGL(( cudaSortTadHalf), dim3(512), dim3(512), 1088 * sizeof(float16), *stream, x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::sortTadDouble(Nd4jPointer *extraPointers, double *x, int *xShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, bool descending) { // to be implemented hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); hipLaunchKernelGGL(( cudaSortTadDouble), dim3(512), dim3(512), 1088 * sizeof(double), *stream, x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::sortCooIndicesFloat(Nd4jPointer *extraPointers, int *indices, float *values, Nd4jIndex length, int rank) { } void NativeOps::sortCooIndicesDouble(Nd4jPointer *extraPointers, int *indices, double *values, Nd4jIndex length, int rank) { } void NativeOps::sortCooIndicesHalf(Nd4jPointer *extraPointers, int *indices, float16 *values, Nd4jIndex length, int rank) { } Nd4jIndex NativeOps::encodeBitmapFloat(Nd4jPointer *extraPointers, float *dx, Nd4jIndex N, int *dz, float threshold) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *resultPointer = reinterpret_cast<int *>(extraPointers[2]); int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]); hipLaunchKernelGGL(( cudaEncodeBitmapFloat), dim3(512), dim3(512), 512 * 2 * sizeof(float) + 384, *stream, dx, N, dz, resultPointer, reductionPointer, threshold); checkCudaErrors(hipStreamSynchronize(*stream)); Nd4jIndex result = (Nd4jIndex) resultPointer[0]; resultPointer[0] = 0; return result; } Nd4jIndex NativeOps::encodeBitmapDouble(Nd4jPointer *extraPointers, double *dx, Nd4jIndex N, int *dz, float threshold) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *resultPointer = reinterpret_cast<int *>(extraPointers[2]); int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]); hipLaunchKernelGGL(( cudaEncodeBitmapDouble), dim3(512), dim3(512), 512 * 2 * sizeof(double) + 384, *stream, dx, N, dz, resultPointer, reductionPointer, threshold); checkCudaErrors(hipStreamSynchronize(*stream)); Nd4jIndex result = (Nd4jIndex) resultPointer[0]; resultPointer[0] = 0; return result; } Nd4jIndex NativeOps::encodeBitmapHalf(Nd4jPointer *extraPointers, float16 *dx, Nd4jIndex N, int *dz, float threshold) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *resultPointer = reinterpret_cast<int *>(extraPointers[2]); int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]); hipLaunchKernelGGL(( cudaEncodeBitmapHalf), dim3(512), dim3(512), (512 * sizeof(float16)) + (512 * sizeof(int)) + 384, *stream, dx, N, dz, resultPointer, reductionPointer, threshold); checkCudaErrors(hipStreamSynchronize(*stream)); Nd4jIndex result = (Nd4jIndex) resultPointer[0]; resultPointer[0] = 0; return result; } void NativeOps::decodeBitmapFloat(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float *dz) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); hipLaunchKernelGGL(( cudaDecodeBitmapFloat), dim3(512), dim3(512), 512 * sizeof(float) + 384, *stream, dx, N, dz); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::decodeBitmapDouble(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, double *dz) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); hipLaunchKernelGGL(( cudaDecodeBitmapDouble), dim3(512), dim3(512), 512 * sizeof(double) + 384, *stream, dx, N, dz); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::decodeBitmapHalf(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float16 *dz) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); hipLaunchKernelGGL(( cudaDecodeBitmapHalf), dim3(512), dim3(512), 512 * sizeof(float16) + 384, *stream, dx, N, dz); checkCudaErrors(hipStreamSynchronize(*stream)); } Nd4jIndex* NativeOps::mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jIndex length) { return nullptr; } void NativeOps::munmapFile(Nd4jPointer *extraPointers, Nd4jIndex* ptrMap, Nd4jIndex length) { } Nd4jPointer NativeOps::executeProtoGraphFloat(Nd4jPointer *extraPointers, Nd4jPointer protoBufferPointer) { return nullptr; } Nd4jPointer NativeOps::executeProtoGraphFloat(Nd4jPointer *extraPointers, const char *fileName) { return nullptr; } Nd4jPointer NativeOps::executeFlatGraphFloat(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { return nullptr; } Nd4jPointer NativeOps::executeFlatGraphHalf(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { return nullptr; } Nd4jPointer NativeOps::executeFlatGraphDouble(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { return nullptr; } const char* NativeOps::getAllCustomOps() { return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations(); } template<typename T> nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, T* tArgs, int numTArgs, int *iArgs, int numIArgs) { nd4j::graph::VariableSpace<T> varSpace; Context<T> block(2, &varSpace); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) { auto shape_ = (int *) inputShapes[e]; auto buffer_ = (T *) inputBuffers[e]; auto array = new nd4j::NDArray<T>(buffer_, shape_); array->triggerAllocationFlag(false, false); // block should contain references to proper variable varSpace.putVariable(1, e, array); block.pickInput(1, e); inShapes.push_back(shape_); } auto shapeList = op->calculateOutputShape(&inShapes, block); if (varSpace.workspace() != nullptr) shapeList->detach(); return shapeList; } nd4j::ShapeList* NativeOps::calculateOutputShapesFloat(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, float* tArgs, int numTArgs, int *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash); return _calculateOutputShapes<float>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } nd4j::ShapeList* NativeOps::calculateOutputShapesHalf(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, float16* tArgs, int numTArgs, int *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash); return _calculateOutputShapes<float16>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } nd4j::ShapeList* NativeOps::calculateOutputShapesDouble(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, int *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash); return _calculateOutputShapes<double>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } template<typename T> nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* inputShapes, int numInputShapes, T* tArgs, int numTArgs, int *iArgs, int numIArgs) { nd4j::graph::Context<T> block(1); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) inShapes.push_back((int *) inputShapes[e]); auto shapeList = op->calculateOutputShape(&inShapes, block); return shapeList; } nd4j::ShapeList* NativeOps::calculateOutputShapesFloat(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputShapes, int numInputShapes, float* tArgs, int numTArgs, int *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash); return _calculateOutputShapes<float>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } nd4j::ShapeList* NativeOps::calculateOutputShapesHalf(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputShapes, int numInputShapes, float16* tArgs, int numTArgs, int *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash); return _calculateOutputShapes<float16>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } nd4j::ShapeList* NativeOps::calculateOutputShapesDouble(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, int *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash); return _calculateOutputShapes<double>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } template<typename T> static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, T* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) { if (op == nullptr) nd4j_printf("Can't find requested operation: [%lld]\n", hash); // we're using the same fake nodeId everywhere here std::vector<nd4j::NDArray<T>*> inputs(numInputs); std::vector<nd4j::NDArray<T>*> outputs; std::vector<T> ttArgs(numTArgs); std::vector<int> iiArgs(numIArgs); // filling block now for (int e = 0; e < numInputs; e++) { auto buffer = (T *) inputBuffers[e]; auto shape = (int *) inputShapes[e]; // auto var = new Variable<T>(new NDArray<T>(buffer, shape)); // block.getVariables()->emplace_back(var); auto array = new nd4j::NDArray<T>(buffer, shape); //array->setSpecialBuffers( (T *) inputBuffers[e + numInputs], (int *) inputShapes[e + numInputs]); inputs[e] = array; } for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e]; for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e]; // hypothetically at this point we have everything filled auto result = op->execute(inputs, ttArgs, iiArgs, isInplace); if (result->status() != ND4J_STATUS_OK) return result->status(); if (!isInplace) { if (result->size() != numOutputs) { return ND4J_STATUS_BAD_OUTPUT; } for (int e = 0; e < numOutputs; e++) { auto buffer = (T *) outputBuffers[e]; auto shape = (int *) outputShapes[e]; nd4j::NDArray <T> tmp(buffer, shape); tmp.assign(result->at(e)); } } delete result; for (auto ptr: inputs) delete ptr; return ND4J_STATUS_OK; } int NativeOps::execCustomOpFloat(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, float* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash); return realExec<float>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace); } int NativeOps::execCustomOpDouble(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash); return realExec<double>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace); } int NativeOps::execCustomOpHalf(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, float16* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash); return realExec<float16>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace); } int NativeOps::registerGraphFloat(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer flatBufferPointer) { auto graph = nd4j::graph::GraphExecutioner<float>::importFromFlatPointer(flatBufferPointer); nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph); return ND4J_STATUS_OK; } int NativeOps::registerGraphDouble(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer flatBufferPointer) { auto graph = nd4j::graph::GraphExecutioner<double>::importFromFlatPointer(flatBufferPointer); nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph); return ND4J_STATUS_OK; } int NativeOps::registerGraphHalf(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer flatBufferPointer) { auto graph = nd4j::graph::GraphExecutioner<float16>::importFromFlatPointer(flatBufferPointer); nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph); return ND4J_STATUS_OK; } template <typename T> static VariablesSet<T>* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph<T>(graphId); auto varSpace = graph->getVariableSpace()->clone(); std::vector<nd4j::NDArray<T> *> handles; for (int e = 0; e < numInputs; e++) { auto idx = inputIndices[e]; // we'll delete this array later, together with cloned VariableSpace auto array = new nd4j::NDArray<T>((T *) inputBuffers[e], (int *) inputShapes[e]); handles.emplace_back(array); if (varSpace->hasVariable(idx)) { auto var = varSpace->getVariable(idx); if (var->hasNDArray()) delete var->getNDArray(); var->setNDArray(array); } else varSpace->putVariable(idx, array); } auto result = nd4j::graph::GraphExecutioner<T>::execute(graph, varSpace); auto varSet = new nd4j::graph::VariablesSet<T>(result); if (result == ND4J_STATUS_OK) { // pull back results, and provide them auto outputs = graph->fetchOutputs(); for (int e = 0; e < outputs->size(); e++) { // we're only getting variable ID/Index from original grap. values will be taken from cloned workspace std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index()); auto var = varSpace->getVariable(varId); varSet->push_back(var->clone()); } delete outputs; } delete varSpace; return varSet; } VariablesSet<float>* NativeOps::executeStoredGraphFloat(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { return executeStoredGraphT<float>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } VariablesSet<float16>* NativeOps::executeStoredGraphHalf(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { return executeStoredGraphT<float16>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } VariablesSet<double>* NativeOps::executeStoredGraphDouble(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { return executeStoredGraphT<double>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } int NativeOps::unregisterGraph(Nd4jPointer *extraPointers, Nd4jIndex graphId) { nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId); return ND4J_STATUS_OK; } void NativeOps::deletePointerArray(Nd4jPointer pointer) { Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer); delete[] ptr; } void NativeOps::deleteIntArray(Nd4jPointer pointer) { int *ptr = reinterpret_cast<int *>(pointer); delete[] ptr; } template <typename T> static void deleteVariablesSetT(Nd4jPointer pointer) { nd4j::graph::VariablesSet<T>* ptr = reinterpret_cast<nd4j::graph::VariablesSet<T>*>(pointer); delete ptr; } void NativeOps::deleteVariablesSetFloat(Nd4jPointer pointer) { deleteVariablesSetT<float>(pointer); } void NativeOps::deleteVariablesSetHalf(Nd4jPointer pointer) { deleteVariablesSetT<float16>(pointer); } void NativeOps::deleteVariablesSetDouble(Nd4jPointer pointer) { deleteVariablesSetT<double>(pointer); } void NativeOps::deleteShapeList(Nd4jPointer shapeList) { nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList); list->destroy(); delete list; } const char* NativeOps::getAllOperations() { return nd4j::OpTracker::getInstance()->exportOperations(); } Nd4jPointer NativeOps::getGraphStateHalf(Nd4jIndex id) { return (Nd4jPointer) new nd4j::graph::GraphState<float16>(id); } Nd4jPointer NativeOps::getGraphStateFloat(Nd4jIndex id) { return (Nd4jPointer) new nd4j::graph::GraphState<float>(id); } Nd4jPointer NativeOps::getGraphStateDouble(Nd4jIndex id) { return (Nd4jPointer) new nd4j::graph::GraphState<double>(id); } void NativeOps::deleteGraphStateHalf(Nd4jPointer state) { auto stateP = (nd4j::graph::GraphState<float16> *) state; delete stateP; } void NativeOps::deleteGraphStateFloat(Nd4jPointer state) { auto stateP = (nd4j::graph::GraphState<float> *) state; delete stateP; } void NativeOps::deleteGraphStateDouble(Nd4jPointer state) { auto stateP = (nd4j::graph::GraphState<double> *) state; delete stateP; } template <typename T> Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState<T> *state, Nd4jIndex opHash, Nd4jIndex *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { /** * That's basically exec, with VariableSpace provided in GraphState: * depending on operation (i.e. while of if), different logic executors could be used */ auto graph = state->graph(); auto varSpace = state->variableSpace(); // Node is dynamically created, and has nothing beyond it: only inputs and outputs // this node has id of 0, and inputs are nd4j::graph::Node<T> node(OpType_LOGIC, opHash, 0); // mapping inputs for (int e = 0; e < numInputs; e++) { auto buffer = (T *) inputBuffers[e]; auto shapeInfo = (int *) inputShapes[e]; auto array = new nd4j::NDArray<T>(buffer, shapeInfo, varSpace->workspace()); // now we just put array to VarSpace varSpace->putVariable(0, e, array); node.pickInput(0, e); } // mapping scopes for (int e = 0; e < numScopes; e++) { // we should check scope existence in GraphState/Graph int scopeId = (int) scopes[e]; if (!state->hasScope(scopeId)) { nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId); return Status::THROW(); } node.pickInput(scopeId, 0); } auto result = LogicExecutor<T>::processNode(graph, &node); if (result != Status::OK()) return result; // mapping outputs for (int e = 0; e < numOutputs; e++) { auto buffer = (T *) outputBuffers[e]; auto shapeInfo = (int *) outputShapes[e]; nd4j::NDArray<T> array(buffer, shapeInfo, varSpace->workspace()); // now we just put array to VarSpace to the same ID //varSpace->putVariable(0, e, array); auto t = varSpace->getVariable(0, e)->getNDArray(); array.assign(t); } // removing input variables for (int e = 0; e < numInputs; e++) { varSpace->dropVariable(0, e); } // after some bla-bla-bla we should have Graph and Node for current op return Status::OK(); } Nd4jStatus NativeOps::execCustomOpWithScopeHalf(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jIndex opHash, Nd4jIndex *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { return execCustomOpWithScope<float16>(extraPointers, (nd4j::graph::GraphState<float16> *) state, opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); } Nd4jStatus NativeOps::execCustomOpWithScopeFloat(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jIndex opHash, Nd4jIndex *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { return execCustomOpWithScope<float>(extraPointers, (nd4j::graph::GraphState<float> *) state, opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); } Nd4jStatus NativeOps::execCustomOpWithScopeDouble(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jIndex opHash, Nd4jIndex *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { return execCustomOpWithScope<double>(extraPointers, (nd4j::graph::GraphState<double> *) state, opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); }
1b31e4ecffcc2251d10dde1d6ad5a0c2ca3cf406.cu
#include "../NativeOps.h" #include <cuda.h> #include <cuda_launch_config.h> #include <buffer.h> #include <helpers/shape.h> #include "../Environment.h" #include <helpers/TAD.h> #include <ops/specials.h> #include <loops/reduce3.h> #include <loops/reduce.h> #include <loops/indexreduce.h> #include <loops/pairwise_transform.h> #include <loops/transform.h> #include <loops/scalar.h> #include <loops/broadcasting.h> #include <loops/summarystatsreduce.h> #include <loops/random.h> //#include <thread> #include <map> #include <cuda.h> #include <cuda_runtime_api.h> #include <cuda_runtime.h> #include <cuda_device_runtime_api.h> #include <pointercast.h> #include <stdio.h> #include <stdlib.h> #include <loops/type_conversions.h> #include <op_boilerplate.h> #include <loops/grid_shaped.h> #include <loops/grid_strided.h> #include <loops/aggregates.h> #include <helpers/threshold.h> #include <ShapeList.h> #include <Context.h> #include <ops/specials_cuda.h> // FIXME: we need cuda-specific implementations #include <helpers/logger.h> #include <NDArray.h> #include <NDArrayFactory.h> #include <GraphExecutioner.h> #include <graph/GraphHolder.h> #include <graph/VariablesSet.h> #include <ops/declarable/OpRegistrator.h> #include <ops/declarable/CustomOperations.h> //#include <sys/time.h> // b40c only available for gcc :( #ifdef __clang__ // do nothing #elif __GNUC__ #include <b40c/util/error_utils.cuh> #include <b40c/util/multiple_buffering.cuh> #include <b40c/radix_sort/enactor.cuh> #endif #include <curand.h> #include <Status.h> using namespace nd4j; cudaDeviceProp *deviceProperties; cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64]; int blockLimit = 128; int maxThreads = 512; bool allowedP2P = false; bool supportedP2P = false; #ifdef __EXPERIMENTAL__ bool experimentalSupport = true; #else bool experimentalSupport = false; #endif int minThreads = 32; __constant__ char deviceConstantMemory[49152]; typedef struct { long streamId; long callId; } __syncInfo; typedef __syncInfo SyncInfo; // this method isn't used, left here for legacy and caution purposes // TLDR: don't use this way, it sucks void CUDART_CB syncCallback(cudaStream_t stream, cudaError_t status, void *data){ SyncInfo *sync = (SyncInfo *) data; printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId); } // this method just does type conversion in fancy way int getDeviceId(Nd4jPointer ptrToDeviceId) { return (int)(Nd4jIndex)ptrToDeviceId; } template <typename T> dim3 getOptimalDimensions(Nd4jIndex n,cudaFuncAttributes attributes, cudaDeviceProp properties) { // we can combine the two to compute a block size int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties); // no real sense launching more threads, then number of elements we have if (num_threads > n) num_threads = n; if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads; // compute the number of blocks of size num_threads to launch int num_blocks = n / num_threads; // check for partial block at the end if (num_blocks > blockLimit) num_blocks = blockLimit; if (num_blocks < 4 && n > 128) { num_blocks = 4; num_threads = n / num_blocks; } if (num_threads >= 768) { num_blocks = num_blocks * 2; num_threads = num_threads / 2; } if(n % num_threads && num_blocks < blockLimit) ++num_blocks; //(num_threads * sizeof(T)) + attributes.sharedSizeBytes); return dim3(num_blocks,num_threads, 3000); } int getBaseMemorySize(int xRank, cudaFuncAttributes funcAttr) { int memory_limit = 256; //funcAttr.sharedSizeBytes; // TODO: remove this later memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4) /* if (xRank == 0) xRank = 2; memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4; memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4; memory_limit += (xRank * 4) * 6; memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase */ return memory_limit; } /* * Basic CUDA constants here: number of blocks per MP */ int getDeviceBlockThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; int blockThreshold = 8; if (ccMajor >= 5) blockThreshold = 32; else if (ccMajor == 3) blockThreshold = 16; else if (ccMajor < 3) blockThreshold = 8; return blockThreshold; } dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, cudaFuncAttributes funcAttr) { int countMP = deviceProperties[deviceId].multiProcessorCount; int blockThreshold = getDeviceBlockThreshold(deviceId); int num_threads = problemLength / (countMP * blockThreshold); num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads); num_threads = nd4j::math::nd4j_max<int>(num_threads, 64); num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads); int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr); dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit); return launchDims; } /* * This message returns shared memory threshold value. default overflow ratio is 0.3 */ int getDeviceSharedThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; // please note threshold isn't multiple of 32, and that's NOT a mistake int shmemThreshold; if (ccMajor == 6 && ccMinor == 0) shmemThreshold = 65536; else if (ccMajor == 6 && ccMinor == 1) shmemThreshold = 49152; else if (ccMajor == 5 && ccMinor == 2) shmemThreshold = 98304; else if (ccMajor == 5) shmemThreshold = 65536; else if (ccMajor == 3 && ccMinor == 7) shmemThreshold = 114688; else shmemThreshold = 49152; return shmemThreshold / 0.3; } dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) { int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int warpSize = deviceProperties[deviceId].warpSize; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); // round num_threads to nearest warpSize num_threads -= num_threads % warpSize; num_threads = nd4j::math::nd4j_max<int>(1, num_threads); if (num_threads < warpSize && tadLength < warpSize) num_threads = tadLength; // since we use shared memory as fast memory for some cases - we need to count that in int memory_limit = getBaseMemorySize(xRank, funcAttr); int memory_floor = memory_limit; int effective_block_limit = countMP * blockThreshold; int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit); int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize); // at this moment we've stored all required information for things. time to count in reduction multipliers int reduction_per_block = 0; bool found = false; if (reduction > 0) while (!found) { reduction_per_block = (num_threads * elementSize * reduction); if (memory_limit + reduction_per_block < desiredShared) { memory_limit += reduction_per_block; found = true; } else { if (num_threads > minThreads) { num_threads -= 32; } else { memory_limit += reduction_per_block; found = true; } } } // at this moment we know total memory used per block, and we also know per-mp limit. int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block); // we don't want to spawn more blocks, that gpu can actually handle without queue //num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // if (num_blocks > countMP) // num_blocks = num_blocks - (num_blocks % countMP); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } reduction_per_block = (num_threads * elementSize * reduction); memory_limit = memory_floor + reduction_per_block; } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP); return dim3(num_blocks,num_threads, memory_limit); } /* * This method returns kernel launch param for linear memory access */ dim3 getFlatLaunchParams(int deviceId, int *xShapeInfo, int *yShapeInfo, cudaFuncAttributes funcAttr) { int xRank = shape::rank(xShapeInfo); int yRank = yShapeInfo == nullptr ? 0 : shape::rank(yShapeInfo); int zRank = 0; int memory_limit = getBaseMemorySize(xRank, funcAttr); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); int xLength = shape::length(xShapeInfo); int effective_block_limit = countMP * blockThreshold; // for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here int num_threads = xLength / effective_block_limit; if (num_threads < minThreads) num_threads = minThreads; num_threads = num_threads - (num_threads % 32); int memory_floor = memory_limit; int num_blocks = xLength / num_threads; num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } } if (xLength / num_threads > blockLimit) num_blocks *= 2; dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit); return launchDims; } /** * This method returns kernel launch params with TAD-based memory access * * @param deviceId * @param xShapeInfo * @param tadShapeInfo * @param funcAttr * @param dimensionLength * @param elementSize * @param reductionSize * @return */ dim3 getReduceLaunchParams(int deviceId, int *xShapeInfo, int *tadShapeInfo, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) { int tadLength = 0; int numTads = 0; if (tadShapeInfo != nullptr) { tadLength = shape::length(tadShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; if (tadLength == 1) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("A xLength: [%i], zLength: [%i]\n", shape::length(xShapeInfo), shape::length(tadShapeInfo)); } } else{ // we have special case - reduction along all dimensions tadLength = nd4j::math::nd4j_min<int>(shape::length(xShapeInfo), 768); numTads = shape::length(xShapeInfo) / tadLength; } int xRank = shape::rank(xShapeInfo); int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo); dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { //|| launchDims.x == 1 printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.x: [%i], launchDims.y: [%i], launchDims.z: [%i]\n", shape::length(xShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z); } return launchDims; } /** * Returns optimal launch parameters * given the extra pointers passed in. * The extra pointer should be * the host pointer for the shape information * associated with the data. * From there it is used to obtain the length * from which we can derive the optimal launch parameters. * */ template <typename T> dim3 getOptimalLaunchParameters(Nd4jPointer *extraPointers, cudaFuncAttributes attributes, cudaDeviceProp properties) { int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); Nd4jIndex n = shape::length(hostXShapeInfo); dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y)); return launchDims; } nd4j::buffer::Buffer<int> * createScalarBuffer(cudaStream_t stream) { int *scalarShapeInfo = shape::createScalarShapeInfo(); nd4j::buffer::Buffer<int> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream); nd4j::buffer::copyDataToGpu(&buff, stream); return buff; } class ScalarShapeInformation { private: nd4j::buffer::Buffer<int> *scalarDimension; nd4j::buffer::Buffer<int> *scalarShapeInfo; // std::thread::id threadId; public: ScalarShapeInformation(cudaStream_t stream) { int *scalarDimensionBuff = (int *) malloc(sizeof(int)); scalarDimensionBuff[0] = MAX_DIMENSION; scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream); scalarShapeInfo = createScalarBuffer(stream); // threadId = std::this_thread::get_id(); } ~ScalarShapeInformation() { nd4j::buffer::freeBuffer(&scalarShapeInfo); nd4j::buffer::freeBuffer(&scalarDimension); } int *getShapeInfoHostPointer() { return scalarShapeInfo->data; } int * getShapeInfoGpuPointer() { return scalarShapeInfo->gData; } int * getDimensionHostPointer() { return scalarDimension->data; } int * getDimensionGpuPointer() { return scalarDimension->gData; } }; template <typename T> class ScalarInfo { nd4j::buffer::Buffer<T> *scalarData; ScalarShapeInformation *shapeInfo; T finalResult; cudaStream_t streamRef; public: ScalarInfo(cudaStream_t stream) { T *scalarResult = (T*)malloc(sizeof(T)); shapeInfo = new ScalarShapeInformation(stream); scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream); streamRef = stream; nd4j::buffer::copyDataToGpu(&scalarData, stream); } T getFinalResultFromDevice() { nd4j::buffer::copyDataFromGpu(&scalarData, streamRef); return scalarData->data[0]; } /** * Get the device shape information * representing a scalar */ int *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); } /** * Get the result pointers */ T *getDevicePointer() { return scalarData->gData; } /** * Get the infinite dimension device pointer */ int *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); } ~ScalarInfo() { nd4j::buffer::freeBuffer(&scalarData); delete shapeInfo; } }; /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ double NativeOps::execIndexReduceScalarDouble(Nd4jPointer *extraPointers,int opNum, double *x, int *xShapeInfo, double *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D1 opNum:[%i]\n", opNum); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], 1, sizeof(double), 3); indexReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); checkCudaErrors(cudaStreamSynchronize(*stream)); double result = resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execIndexReduceDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int *dimension, int dimensionLength) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D2 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], dimensionLength, sizeof(double), 3); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); indexReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execBroadcastDouble(Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, int *dimension, int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *deviceTADOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D3 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[26], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(broadcastSimple, double, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xStride * @param y * @param yStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execPairwiseTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int xStride, double *y, int yStride, double *result, int resultStride, double *extraParams, Nd4jIndex n) { dim3 launchDims(512, 512, 2048); functions::pairwise_transforms::PairWiseTransform<double>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n); } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n * @param xIndexes * @param yIndexes * @param resultIndexes */ void NativeOps::execPairwiseTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, double *extraParams, int *xIndexes, int *yIndexes, int *resultIndexes) { /* cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D5 opNum:[%i]\n", opNum); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[24]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); pairWiseTransformDoubleIndex <<<launchDims.x, launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), xIndexes, yIndexes, resultIndexes, allocationPointer, deviceTADShapeInfo); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); */ } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execPairwiseTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, double *extraParams) { dim3 launchDims(512, 512, 2048); functions::pairwise_transforms::PairWiseTransform<double>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D7 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); if (opNum == 19) { execReduceDouble(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo); } dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int *dimension, int dimensionLength) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D8 opNum:[%i]\n", opNum); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); if (opNum == 19) { execReduceDouble(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength); //checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * We have separate kernels, optimized for different number of dimensions for reductions */ if (dimensionLength == 1) { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric1D, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else if (shape::rank(hostTADShapeInfo) <= 3) { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[33], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric3D, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGenericXD, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @return */ double NativeOps::execReduceScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D9 opNum:[%i]\n", opNum); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); //dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[22]); // for LogExpSum op we need to know max value, and store it if (opNum == 19) { double tmp = execReduceScalarDouble(extraPointers, 3, x, xShapeInfo, extraParams); extraParams = resultPointer; }; // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(cudaStreamSynchronize(*stream)); double result = resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo */ void NativeOps::execReduce3Double( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *y, int *yShapeInfo, double *result, int *resultShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D10 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]); reduce3Double<<<1,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, nullptr, 1, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo */ double NativeOps::execReduce3ScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *y, int *yShapeInfo){ if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D11 opNum:[%i]\n", opNum); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]); reduce3ScalarDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, resultPointer, nullptr, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); // since this method should return scalar value - we should block on this call checkCudaErrors(cudaStreamSynchronize(*stream)); double result = resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execReduce3Double( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, int *dimension, int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D12 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]); reduce3Double<<<1,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xStride * @param result * @param resultStride * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int xStride, double *result, int resultStride, double scalar, double *extraParams, Nd4jIndex n) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[20]); functions::scalar::ScalarTransform<double>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, scalar, extraParams, n); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *result, int *resultShapeInfo, double scalar, double *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[19]); // this macro builds bunch of IF/ELSE selectors for kernel launch //DISPATCH_SIMPLE(scalarSimpleShaped, double, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS)) functions::scalar::ScalarTransform<double>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n * @param xIndexes * @param resultIndexes */ void NativeOps::execScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *result, int *resultShapeInfo, double scalar, double *extraParams, Nd4jIndex n, int *xIndexes, int *resultIndexes){ printf("Unsupported operation: scalarIndices\n"); /* } cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D15 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[18]); scalarDoubleIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, n, scalar, x, extraParams, result, resultIndexes, allocPointer); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); */ } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ double NativeOps::execSummaryStatsScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(double), 8); launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); return functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execSummaryStatsDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo,bool biasCorrected) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D17 opNum:[%i]\n", opNum); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(double), 8); // we have to limit grid size here, due to limited nature of reduction/allocation pointers launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execSummaryStatsDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(double), 8); // we're limiting maximum grid size for summaryStats ops launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected); } /** * * @param opNum * @param dx * @param xStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int xStride, double *z, int zStride, double *extraParams, Nd4jIndex n) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D19 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[16]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformStrided, double, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *result, int *resultShapeInfo, double *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D20 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); int *maskedAllocPointer = allocPointer; // special pointer for special buffer for special ops double *specialPointer = reinterpret_cast<double *>(extraPointers[6]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]); int *dimension = (int *) specialPointer; int *maxDimension = dimension + 1; int *maxShapeBuffer = (int *) maxDimension + 1; double * special = (double *) maxShapeBuffer + (MAX_RANK * 2 + 4); int *devTadShapeInfo = reinterpret_cast<int *> (extraPointers[10]); Nd4jIndex *devTadOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]); /** * ops between 38 and 41 are special ops: * SoftMax, LogSoftMax, SoftMaxDerivative, IsMax * On cuda we execute them as */ // simple trick to get workaround over reductions into scalar if (opNum >= 38 && opNum <= 41) { if (shape::isVector(hostXShapeInfo) && opNum != 41) { // if that's vector, we just go directly to op in 1 block /* * For vector cases of everything, but IsMax (41) we go for single-kernel calls */ int length = shape::length(hostXShapeInfo); int block = nd4j::math::nd4j_min<int>(256, length); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(double) * 4); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, double, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS)) } else { // going for blockwise specials // we'll do some pointers mangling here, and execute kernels one by one int *shape = shape::shapeOf(hostXShapeInfo); switch (opNum) { case 40: // LogSoftMax case 39: // SoftMax Derivative case 38: {// softmax Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1}; int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; // TODO: we could get rid of this one eventually prepareShapeBuffer <<<1, 1, 128, *stream>>> (dimension, maxDimension, maxShapeBuffer, shape[0]); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceDouble(tempPointers, 3, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcastDouble(tempPointers, 1, dx, xShapeInfo, special, maxShapeBuffer, result, resultShapeInfo, dimension, 1); // exp 3 execTransformDouble(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceDouble(tempPointers, 1, result, resultShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcastDouble(tempPointers, 3, result, resultShapeInfo, special, maxShapeBuffer, result, resultShapeInfo, dimension, 1); // log 3 if (opNum == 40) execTransformDouble(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams); else if (opNum == 39) execTransformDouble(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams); checkCudaErrors(cudaStreamSynchronize(*stream)); delete hostMaxShapeBuffer; break; } case 41: { // IsMax along all dimensions bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } if (scalarCheat) { /** * In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call */ int maxIdx = (int) execIndexReduceScalarDouble(extraPointers, 0, dx, xShapeInfo, extraParams); int targetIdx = 0; if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1]; fillIsMaxDouble<<< 1, 128, 0, *stream >>>(result, shape::length(hostXShapeInfo), targetIdx); } else { int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]); Nd4jIndex *tadMaxOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); special = reinterpret_cast<double *>(extraPointers[17]); int dimensionLength = getDeviceId(extraPointers[18]); // we call for IMax on specified dimension execIndexReduceDouble(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); // at this point, all IMax indexes are gathered, and we execute filler fillDimensionalIsMaxDouble<<<blockLimit, 64, funcAttributes[37].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets ); checkCudaErrors(cudaStreamSynchronize(*stream)); } break; } default: { printf("Bad case for transformDouble\n"); break; } } } } else { // for Im2Col & Col2Im we enforce higher dimensionality // TODO: investigate this on high-end gpus if (opNum == 37 || opNum == 36 || opNum == 71) { launchDims.x = 512; launchDims.y = 512; launchDims.z += 512 * sizeof(double); } else if (opNum == 70) { // we'll be using shared memory to speed up reverse launchDims.z += launchDims.y * sizeof(double); } // Histogram op requires additional memory chunk // FIXME: make this one to use cache if (opNum == 48) { int length = shape::length(hostZShapeInfo); cudaMalloc((void **)&maskedAllocPointer, length * launchDims.x * sizeof(double)); } if (opNum == 71) { launchDims.z += 512 * sizeof(double); } // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, double, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS)) // we need guaranteed sync here, due to temp memory release if (nd4j::Environment::getInstance()->isDebug() || opNum == 48) checkCudaErrors(cudaStreamSynchronize(*stream)); // release Histogram memory if (opNum == 48) { cudaFree((void *)maskedAllocPointer); } } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *result, int *resultShapeInfo, double *extraParams, int *xIndexes, int *resultIndexes) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D21 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[14]); transformDoubleIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultIndexes, allocPointer, reductionPointer); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ float NativeOps::execIndexReduceScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams){ if (nd4j::Environment::getInstance()->isDebug()) printf("F1 opNum:[%i]\n", opNum); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float), 4); if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1) printf("AF1 opNum:[%i]\n", opNum); indexReduceFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); // once again - since we return scalar value in this method, we should block this kernel launch checkCudaErrors(cudaStreamSynchronize(*stream)); float result = resultPointer[0]; return result; } float NativeOps::execIndexReduceScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams){ if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H1 opNum:[%i]\n", opNum); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float16), 8); if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1) printf("AH1 opNum:[%i]\n", opNum); indexReduceHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); // blocking for scalar output checkCudaErrors(cudaStreamSynchronize(*stream)); float result = (float) resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execIndexReduceFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int *dimension, int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F2 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float), 4); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF2 opNum:[%i]\n", opNum); indexReduceFloat<<<launchDims.x, launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execIndexReduceHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H2 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float16), 8); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH2 opNum:[%i]\n", opNum); indexReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execBroadcastFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, int *dimension, int dimensionLength){ /* cudaEvent_t start; cudaEventCreateWithFlags(&start, cudaEventDisableTiming); timespec tsX; timespec tsY; clock_gettime(CLOCK_REALTIME, &tsX); */ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *deviceTADOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F3 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float), 0); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(broadcastSimple, float, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS)) /* SyncInfo *info = new SyncInfo(); info->streamId = 32; info->callId = 1234567890; timespec ts1; timespec ts2; clock_gettime(CLOCK_REALTIME, &ts1); */ /* broadcastFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), y, yShapeInfo, shape::rank(hostYShapeInfo), result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ); */ /* clock_gettime(CLOCK_REALTIME, &ts2); // cudaEventRecord(start, 0); // cudaStreamAddCallback(*stream, syncCallback, (void*)info, 0); */ if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); /* clock_gettime(CLOCK_REALTIME, &tsY); printf("Execution time: %i\n", (ts2.tv_nsec - ts1.tv_nsec)); printf("Overall time: %i\n", (tsY.tv_nsec - tsX.tv_nsec)); printf("Callback setup time: %i\n", (tsY.tv_nsec - ts2.tv_nsec)); printf("-------------------------------------\n"); */ } void NativeOps::execBroadcastHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *deviceTADOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H3 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float16), 0); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(broadcastSimple, float16, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xStride * @param y * @param yStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execPairwiseTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int xStride, float *y, int yStride, float *result, int resultStride, float *extraParams, Nd4jIndex n){ dim3 launchDims(512, 512, 2048); functions::pairwise_transforms::PairWiseTransform<float>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n); } void NativeOps::execPairwiseTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int xStride, float16 *y, int yStride, float16 *result, int resultStride, float16 *extraParams, Nd4jIndex n){ dim3 launchDims(512, 512, 2048); functions::pairwise_transforms::PairWiseTransform<float16>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n); } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n * @param xIndexes * @param yIndexes * @param resultIndexes */ void NativeOps::execPairwiseTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int *xShapeInfo, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, float *extraParams, int *xIndexes, int *yIndexes, int *resultIndexes){ /* cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F5 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(float), 0); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF5 opNum:[%i]\n", opNum); pairWiseTransformFloatIndex<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), xIndexes, yIndexes, resultIndexes, allocationPointer, deviceTADShapeInfo); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); */ } void NativeOps::execPairwiseTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int *xShapeInfo, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams, int *xIndexes, int *yIndexes, int *resultIndexes){ /* cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H5 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(float16), 0); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH5 opNum:[%i]\n", opNum); pairWiseTransformHalfIndex<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), xIndexes, yIndexes, resultIndexes, allocationPointer, deviceTADShapeInfo); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); */ } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execPairwiseTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int *xShapeInfo, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, float *extraParams){ dim3 launchDims(512, 512, 2048); functions::pairwise_transforms::PairWiseTransform<float>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);; } void NativeOps::execPairwiseTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int *xShapeInfo, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams){ dim3 launchDims(512, 512, 2048); functions::pairwise_transforms::PairWiseTransform<float16>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F7 opNum:[%i]\n", opNum); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float), 1); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF7 opNum:[%i]\n", opNum); if (opNum == 19) { execReduceFloat(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo); } // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execReduceHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H7 opNum:[%i]\n", opNum); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float16), 1); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH7 opNum:[%i]\n", opNum); if (opNum == 19) { execReduceHalf(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo); } // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int *dimension,int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F8 opNum:[%i]\n", opNum); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); // dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float), 1); if (opNum == 19) { execReduceFloat(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength); } // we call different kernels optimized for different number of dimensions in TAD if (dimensionLength == 1) { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(float), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric1D, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else if (shape::rank(hostTADShapeInfo) <= 3) { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(float), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric3D, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(float), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGenericXD, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execReduceHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo, int *dimension,int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H8 opNum:[%i]\n", opNum); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float16), 1); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH8 opNum:[%i]\n", opNum); if (opNum == 19) { execReduceHalf(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength); } // calling different kernels, depending on number of dimensions in TAD if (dimensionLength == 1) { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric1D, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else if (shape::rank(hostTADShapeInfo) <= 3) { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric3D, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGenericXD, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @return */ float NativeOps::execReduceScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F9 opNum:[%i]\n", opNum); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[8]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF9 opNum:[%i]\n", opNum); // for LogExpSum op we need to know max value, and store it if (opNum == 19) { float tmp = execReduceScalarFloat(extraPointers, 3, x, xShapeInfo, extraParams); extraParams = resultPointer; }; // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) // blocking this one checkCudaErrors(cudaStreamSynchronize(*stream)); float result = resultPointer[0]; return result; } float NativeOps::execReduceScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H9 opNum:[%i]\n", opNum); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 2, funcAttributes[8]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH9 opNum:[%i]\n", opNum); // for LogExpSum op we need to know max value, and store it if (opNum == 19) { float tmp = execReduceScalarHalf(extraPointers, 3, x, xShapeInfo, extraParams); extraParams = resultPointer; }; // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) // blocking call checkCudaErrors(cudaStreamSynchronize(*stream)); float result = (float) resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo */ void NativeOps::execReduce3Float( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *y, int *yShapeInfo, float *result, int *resultShapeInfo){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F10 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF10 opNum:[%i]\n", opNum); reduce3ScalarFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execReduce3Half( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H10 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH10 opNum:[%i]\n", opNum); reduce3ScalarHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo */ float NativeOps::execReduce3ScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *y, int *yShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F11 opNum:[%i]\n", opNum); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 32, funcAttributes[7]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF11 opNum:[%i]\n", opNum); reduce3ScalarFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, resultPointer, nullptr, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); // blocking call checkCudaErrors(cudaStreamSynchronize(*stream)); float result = resultPointer[0]; return result; } float NativeOps::execReduce3ScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *y, int *yShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H11 opNum:[%i]\n", opNum); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH11 opNum:[%i]\n", opNum); reduce3ScalarHalf<<<launchDims.x,launchDims.y,launchDims.z + 2048, *stream>>>( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, resultPointer, nullptr, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); // blocking call checkCudaErrors(cudaStreamSynchronize(*stream)); float result = (float) resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execReduce3Float( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, int *dimension, int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F12 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF12 opNum:[%i]\n", opNum); if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) { reduce3ScalarFloat << < launchDims.x, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } else { reduce3Float << < 1, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execReduce3Half( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H12 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH12 opNum:[%i]\n", opNum); if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) { reduce3ScalarHalf<< < launchDims.x, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } else { reduce3Half<< < 1, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xStride * @param result * @param resultStride * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int xStride, float *result, int resultStride, float scalar, float *extraParams, Nd4jIndex n){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]); // this macro builds bunch of IF/ELSE selectors for kernel launch functions::scalar::ScalarTransform<float>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, scalar, extraParams, n); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int xStride, float16 *result, int resultStride, float scalar, float16 *extraParams, Nd4jIndex n){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]); // this macro builds bunch of IF/ELSE selectors for kernel launch //DISPATCH_SIMPLE(scalarSimpleStrided, float16, PARAMS(n, scalar, x, xStride, extraParams, result, resultStride, allocPointer), OPS_A(SCALAR_OPS)) float16 sc = (float16) scalar; functions::scalar::ScalarTransform<float16>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, sc, extraParams, n); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *result, int *resultShapeInfo, float scalar, float *extraParams){ int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); Nd4jIndex n = shape::length(hostXShapeInfo); // if (nd4j::Environment::getInstance()->isDebugAndVerbose()) // printf("F14 opNum:[%i]\n", opNum); //dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]); //if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) // printf("AF14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch //DISPATCH_SIMPLE(scalarSimpleShaped, float, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS)) functions::scalar::ScalarTransform<float>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *result, int *resultShapeInfo, float scalarF, float16 *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); Nd4jIndex n = shape::length(hostXShapeInfo); //if (nd4j::Environment::getInstance()->isDebugAndVerbose()) // printf("H14 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]); float16 scalar = (float16) scalarF; //if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) // printf("AH14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch //DISPATCH_SIMPLE(scalarSimpleShaped, float16, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS)) functions::scalar::ScalarTransform<float16>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n * @param xIndexes * @param resultIndexes */ void NativeOps::execScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *result, int *resultShapeInfo, float scalar, float *extraParams, int *xIndexes, int *resultIndexes){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); Nd4jIndex n = shape::length(hostXShapeInfo); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F15 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[4]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF15 opNum:[%i]\n", opNum); /* scalarFloatIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, n, scalar, x, extraParams, result, resultIndexes, allocPointer); */ if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ float NativeOps::execSummaryStatsScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float), 8); // we limit grid size for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); return functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected); } float NativeOps::execSummaryStatsScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float16), 8); launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); return (float) functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execSummaryStatsFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float), 8); // limiting number of blocks in grid, to match buffer memory size launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected); } void NativeOps::execSummaryStatsHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float16), 8); // as everywhere else, we limit maximal number of blocks for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execSummaryStatsFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(float), 8); // as everywhere else, we limit maximal number of blocks for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected); } void NativeOps::execSummaryStatsHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(float16), 8); // as everywhere else, we limit maximal number of blocks for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected); } /** * * @param opNum * @param dx * @param xStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int xStride, float *z, int zStride, float *extraParams, Nd4jIndex n) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F19 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformStrided, float, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int xStride, float16 *z, int zStride, float16 *extraParams, Nd4jIndex n) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H19 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformStrided, float16, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum, float *dx, int *xShapeInfo, float *result, int *resultShapeInfo, float *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F20 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); // special pointer for special buffer for special ops float *specialPointer = reinterpret_cast<float *>(extraPointers[6]); int *dimension = (int *) specialPointer; int *maxDimension = dimension + 1; int *maxShapeBuffer = (int *) maxDimension + 1; float * special = (float *) maxShapeBuffer + (MAX_RANK * 2 + 4); int *maskedAllocPointer = allocPointer; int *devTadShapeInfo = reinterpret_cast<int *> (extraPointers[10]); Nd4jIndex *devTadOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF20 opNum:[%i]\n", opNum); // simple trick to get workaround over reductions into scalar // that's special ops: SoftMax, SoftMaxDerivative, LogSoftMax, IsMax if (opNum >= 38 && opNum <= 41) { if (shape::isVector(hostXShapeInfo) && opNum != 41) { // if that's vector, we just go directly to op in 1 block int length = shape::length(hostXShapeInfo); int block = nd4j::math::nd4j_min<int>(length, 256); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(float) * 4); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, float, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS)) } else { // going for blockwise specials int *shape = shape::shapeOf(hostXShapeInfo); switch (opNum) { case 40: // LogSoftMax case 39: // SoftMax Derivative case 38: {// softmax Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1}; int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; prepareShapeBuffer <<< 1, 1, 128, *stream >>> (dimension, maxDimension, maxShapeBuffer, shape[0]); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); //shape::printShapeInfo(maxShapeBuffer); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceFloat(tempPointers, 3, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcastFloat(tempPointers, 1, dx, xShapeInfo, special, maxShapeBuffer, result, resultShapeInfo, dimension, 1); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); // exp 3 execTransformFloat(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceFloat(tempPointers, 1, result, resultShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcastFloat(tempPointers, 3, result, resultShapeInfo, special, maxShapeBuffer, result, resultShapeInfo, dimension, 1); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); // log 3 if (opNum == 40) execTransformFloat(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams); else if (opNum == 39) execTransformFloat(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams); checkCudaErrors(cudaStreamSynchronize(*stream)); delete hostMaxShapeBuffer; break; } case 41: { // IsMax along all dimensions bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } if (scalarCheat) { // if that's 1D input - we'll just go for single dim IMax op call + filler int maxIdx = (int) execIndexReduceScalarFloat(extraPointers, 0, dx, xShapeInfo, extraParams); int targetIdx = 0; if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1]; fillIsMaxFloat<<< 1, 128, 1536, *stream >>>(result, shape::length(hostXShapeInfo), targetIdx); checkCudaErrors(cudaStreamSynchronize(*stream)); } else { // going for dimension-based IsMax int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]); Nd4jIndex *tadMaxOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); special = reinterpret_cast<float *>(extraPointers[17]); int dimensionLength = getDeviceId(extraPointers[18]); // we call for IMax on specified dimension execIndexReduceFloat(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); // at this point, all IMax indexes are gathered, and we execute fillDimensionalIsMaxFloat<<<blockLimit, 64, funcAttributes[36].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets ); checkCudaErrors(cudaStreamSynchronize(*stream)); } break; } default: { printf("Bad case for transformFloat\n"); break; } } } } else { // we're enforcing larger grids for Col2Im & Im2Col // TODO: for high-end gpus we might use higher values here if (opNum == 37 || opNum == 36 || opNum == 71) { launchDims.x = 512; launchDims.y = 512; launchDims.z += 512 * sizeof(float); } else if (opNum == 70) { // we'll be using shared memory to speed up reverse launchDims.z += launchDims.y * sizeof(float); } // histogram op requies additional memory chunk :( if (opNum == 48) { int length = shape::length(hostZShapeInfo); cudaMalloc((void **) &maskedAllocPointer, length * launchDims.x * sizeof(float)); } if (opNum == 71) { launchDims.z += 512 * sizeof(float); } DISPATCH_SIMPLE(transformShaped, float, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS)) // we need guaranteed sync here, due to temp memory release if (nd4j::Environment::getInstance()->isDebug() || opNum == 48) checkCudaErrors(cudaStreamSynchronize(*stream)); // release memory chunk if (opNum == 48) { cudaFree((void *) maskedAllocPointer); } } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execTransformHalf(Nd4jPointer *extraPointers,int opNum, float16 *dx, int *xShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H20 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); int *maskedAllocPointer = allocPointer; float16 *specialPointer = reinterpret_cast<float16 *>(extraPointers[6]); int *dimension = (int *) specialPointer; int *maxDimension = dimension + 1; int *maxShapeBuffer = (int *) maxDimension + 1; float16 * special = (float16 *) maxShapeBuffer + (MAX_RANK * 2 + 4); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH20 opNum:[%i]\n", opNum); int *devTadShapeInfo = reinterpret_cast<int *> (extraPointers[10]); Nd4jIndex *devTadOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]); // simple trick to get workaround over reductions into scalar // SoftMax, SoftMaxDerivative, LogSoftMax, IsMax if (opNum >= 38 && opNum <= 41) { if (shape::isVector(hostXShapeInfo) && opNum != 41) { // if that's vector, we just go directly to op in 1 block int length = shape::length(hostXShapeInfo); int block = nd4j::math::nd4j_min<int>(length, 256); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(float16) * 4); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, float16, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS)) } else { // going for blockwise specials int *shape = shape::shapeOf(hostXShapeInfo); switch (opNum) { case 40: // LogSoftMax case 39: // SoftMax Derivative case 38: {// softmax Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1}; int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; // FIXME: fix this prepareShapeBuffer <<< 1, 1, 128, *stream >>> (dimension, maxDimension, maxShapeBuffer, shape[0]); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); //shape::printShapeInfo(maxShapeBuffer); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceHalf(tempPointers, 3, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcastHalf(tempPointers, 1, dx, xShapeInfo, special, maxShapeBuffer, result, resultShapeInfo, dimension, 1); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); // exp 3 execTransformHalf(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceHalf(tempPointers, 1, result, resultShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcastHalf(tempPointers, 3, result, resultShapeInfo, special, maxShapeBuffer, result, resultShapeInfo, dimension, 1); if (opNum == 40) { if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); execTransformHalf(tempPointers, 47, result, resultShapeInfo, result, resultShapeInfo, extraParams); } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); // log 3 if (opNum == 40) execTransformHalf(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams); else if (opNum == 39) execTransformHalf(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams); checkCudaErrors(cudaStreamSynchronize(*stream)); delete hostMaxShapeBuffer; break; } case 41: { // IsMax along all dimensions bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } if (scalarCheat) { // 1D input, aka vector int maxIdx = (int) execIndexReduceScalarHalf(extraPointers, 0, dx, xShapeInfo, extraParams); int targetIdx = 0; if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1]; fillIsMaxHalf<<< 1, 128, 1536, *stream >>>(result, shape::length(hostXShapeInfo), targetIdx); } else { // going for dimension-based IsMax int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]); Nd4jIndex *tadMaxOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); special = reinterpret_cast<float16 *>(extraPointers[17]); int dimensionLength = getDeviceId(extraPointers[18]); // we call for IMax on specified dimension execIndexReduceHalf(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); // at this point, all IMax indexes are gathered, and we execute fillDimensionalIsMaxHalf<<<blockLimit, 64, funcAttributes[36].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets ); checkCudaErrors(cudaStreamSynchronize(*stream)); } break; } default: { printf("Bad case for transformHalf\n"); break; } } } } else { // Im2Col & Col2Im enforced grids if (opNum == 37 || opNum == 36 || opNum == 71) { launchDims.x = 512; launchDims.y = 512; launchDims.z += 512 * sizeof(float16); } else if (opNum == 70) { // we'll be using shared memory to speed up reverse launchDims.z += launchDims.y * sizeof(float); } // Histogram op requires additional memory chunk if (opNum == 48) { int length = shape::length(hostZShapeInfo); cudaMalloc((void **)&maskedAllocPointer, length * launchDims.x * sizeof(float16)); } if (opNum == 71) { launchDims.z += 512 * sizeof(float); } // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, float16, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS)) // we need guaranteed sync here, due to temp memory release if (nd4j::Environment::getInstance()->isDebug() || opNum == 48) checkCudaErrors(cudaStreamSynchronize(*stream)); // release that histogram memory chunk if (opNum == 48) { cudaFree((void *)maskedAllocPointer); } } if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int *xShapeInfo, float *result, int *resultShapeInfo, float *extraParams, int *xIndexes, int *resultIndexes) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F21 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF21 opNum:[%i]\n", opNum); transformFloatIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultIndexes, allocPointer, reductionPointer); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int *xShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams, int *xIndexes, int *resultIndexes) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H21 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH21 opNum:[%i]\n", opNum); transformHalfIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultIndexes, allocPointer, reductionPointer); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } template <typename T> __device__ void flattenKernelGeneric(int dOffset, char order, T *result, int *resultShapeInfo, T *input, int *inputShapeInfo, int *allocationPointer) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 4, 4, sizeof(shape::TAD), 2); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; int *zShape = shape::shapeOf(resultShapeInfo); int *zStride = shape::stride(resultShapeInfo); int *yShape = shape::shapeOf(inputShapeInfo); int *yStride = shape::stride(inputShapeInfo); char yOrder = shape::order(inputShapeInfo); int len = shape::length(inputShapeInfo); int resultEWS = shape::elementWiseStride(resultShapeInfo); int inputEWS = shape::elementWiseStride(inputShapeInfo); if (yOrder == order) { if (resultEWS >= 1 && inputEWS >= 1) { for (int i = tid; i < len; i+= gridDim.x * blockDim.x) { result[i * resultEWS + dOffset] = input[i * inputEWS]; } } else { int rank = shape::rank(inputShapeInfo); int coord[MAX_RANK]; if(order == 'f') { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2sub(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i + dOffset] = input[offset]; } } else { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2subC(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i + dOffset] = input[offset]; } } } } else { int rank = shape::rank(inputShapeInfo); int coord[MAX_RANK]; if(order == 'f') { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2sub(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i+dOffset] = input[offset]; } } else { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2subC(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i+dOffset] = input[offset]; } } } } extern "C" __global__ void flattenKernelDouble(int offset, char order, double *result, int *resultShapeInfo, double *input, int *inputShapeInfo, int *allocationPointer) { flattenKernelGeneric<double>( offset, order, result, resultShapeInfo, input, inputShapeInfo, allocationPointer); } extern "C" __global__ void flattenKernelFloat(int offset, char order, float *result, int *resultShapeInfo, float *input, int *inputShapeInfo, int *allocationPointer) { flattenKernelGeneric<float>( offset, order, result, resultShapeInfo, input, inputShapeInfo, allocationPointer); } extern "C" __global__ void flattenKernelHalf(int offset, char order, float16 *result, int *resultShapeInfo, float16 *input, int *inputShapeInfo, int *allocationPointer) { flattenKernelGeneric<float16>( offset, order, result, resultShapeInfo, input, inputShapeInfo, allocationPointer); } /** * Append an input array * to the end of a flat array * in a particular order * @param offset the offset of the array to start at * @param order the order * @param result the result array * @param resultShapeInfo the shape info for te array * @param input the input for the array * @param inputShapeInfo the shape information for that array */ void NativeOps::flattenFloat( Nd4jPointer *extraPointers, int offset, char order, float *result, int *resultShapeInfo, float *input, int *inputShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F22 opNum:[7]\n"); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF222 opNum:[7]\n"); flattenKernelFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::flattenHalf( Nd4jPointer *extraPointers, int offset, char order, float16 *result, int *resultShapeInfo, float16 *input, int *inputShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H22 opNum:[7]\n"); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH222 opNum:[7]\n"); flattenKernelHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * Append an input array * to the end of a flat array * in a particular order * @param offset the offset of the array to start at * @param order the order * @param result the result array * @param resultShapeInfo the shape info for te array * @param input the input for the array * @param inputShapeInfo the shape information for that array */ void NativeOps::flattenDouble( Nd4jPointer *extraPointers, int offset, char order, double *result, int *resultShapeInfo, double *input, int *inputShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D30 opNum:[7]\n"); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[34]); flattenKernelDouble<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::checkP2P() { int curDevice = 0; cudaGetDevice(&curDevice); int devCnt = 0; cudaGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; bool tempSupport = true; if (devCnt > 1) { for (int x = 0; x < devCnt; x++) { for (int y = 0; y < devCnt; y++) { if (x == y) continue; int canAccess = 0; cudaSetDevice(x); cudaDeviceCanAccessPeer(&canAccess, x , y); if (!canAccess) { tempSupport = false; break; } } } supportedP2P = tempSupport; cudaSetDevice(curDevice); } else { // if we have only 1 device - we say that we support P2P, since all data will be on 1 device supportedP2P = true; } } void NativeOps::enableP2P(bool enable) { if (enable == allowedP2P) return; int curDevice = 0; cudaGetDevice(&curDevice); int devCnt = 0; cudaGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; if (devCnt > 1) { for (int x = 0; x < devCnt; x++) { for (int y = 0; y < devCnt; y++) { if (x == y) continue; int canAccess = 0; cudaSetDevice(x); cudaDeviceCanAccessPeer(&canAccess, x , y); if (canAccess) { if (enable) { cudaDeviceEnablePeerAccess(y, 0); } else { cudaDeviceDisablePeerAccess(y); } } else { if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", x, y); } } } cudaSetDevice(curDevice); } allowedP2P = enable; cudaSetDevice(curDevice); } bool NativeOps::isP2PAvailable() { return supportedP2P; } void NativeOps::initializeDevicesAndFunctions() { int devCnt = 0; cudaGetDeviceCount(&devCnt); deviceProperties = new cudaDeviceProp[devCnt]; for (int i = 0; i < devCnt; i++) { cudaSetDevice(i); cudaGetDeviceProperties(&deviceProperties[i], i); cudaDeviceSetLimit(cudaLimitStackSize, 4096); } cudaSetDevice(0); checkP2P(); // enabling p2p gpu access if it's supported if (supportedP2P && devCnt > 1) enableP2P(allowedP2P); cudaFuncGetAttributes(&funcAttributes[0], (void *)transformFloatIndexes); //void (*transformFloatPointer1)(int opNum, float *dy,int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocationPointer, float *reductionPointer) = transformFloat; // FIXME cudaFuncGetAttributes(&funcAttributes[1], transformFloatIndexes); //void (*transformFloatPointer2)(int opNum, Nd4jIndex n, float *dy, int incy, float *params, float *result,int resultStride, int *allocationPointer, float *reductionPointer) = transformFloat; // FIXME cudaFuncGetAttributes(&funcAttributes[2], transformFloatIndexes); //cudaFuncGetAttributes(&funcAttributes[3], (void *)functions::summarystats::summaryStatsReduceFloat); //cudaFuncGetAttributes(&funcAttributes[4], (void *)scalarFloatIndexes); // void (*scalarFloatPointer1)(int opNum, float dx,float *dy, int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarFloat; // cudaFuncGetAttributes(&funcAttributes[5], scalarFloatIndexes); // void (*scalarFloatPointer2)(int opNum, Nd4jIndex n,float dx, float *dy, int incy, float *params, float *result,int resultStride, int *allocPointer) = scalarFloat; // cudaFuncGetAttributes(&funcAttributes[6], scalarFloatIndexes); cudaFuncGetAttributes(&funcAttributes[7], reduce3Float); cudaFuncGetAttributes(&funcAttributes[8], reduceSimpleGenericXD_0_float); // printf("reduceFloat regs: [%i], static shmem: [%i]\n", funcAttributes[8].numRegs, funcAttributes[8].sharedSizeBytes); cudaFuncGetAttributes(&funcAttributes[28], reduceSimpleGeneric1D_0_float); // 1D // printf("reduceFloat1D regs: [%i], static shmem: [%i]\n", funcAttributes[28].numRegs, funcAttributes[28].sharedSizeBytes); cudaFuncGetAttributes(&funcAttributes[29], reduceSimpleGeneric3D_0_float); // 6D // printf("reduceFloat6D regs: [%i], static shmem: [%i]\n", funcAttributes[29].numRegs, funcAttributes[29].sharedSizeBytes); cudaFuncGetAttributes(&funcAttributes[30], flattenKernelFloat); cudaFuncGetAttributes(&funcAttributes[31], concatKernelFloat); // cudaFuncGetAttributes(&funcAttributes[9], pairWiseTransformFloat); // cudaFuncGetAttributes(&funcAttributes[10], pairWiseTransformFloatIndex); // cudaFuncGetAttributes(&funcAttributes[11], pairWiseTransformStridedFloat); cudaFuncGetAttributes(&funcAttributes[12], broadcastSimple_0_float); cudaFuncGetAttributes(&funcAttributes[13], indexReduceFloat); ///////////////////////////////////////// Doubles are separate, just in case of... cudaFuncGetAttributes(&funcAttributes[14], transformDoubleIndexes); // void (*transformDoublePointer1)(int opNum, double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocationPointer, double *reductionPointer) = transformDouble; // FIXME cudaFuncGetAttributes(&funcAttributes[15], transformDoubleIndexes); //void (*transformDoublePointer2)(int opNum, Nd4jIndex n, double *dy, int incy, double *params, double *result,int resultStride, int *allocationPointer, double *reductionPointer) = transformDouble; // FIXME cudaFuncGetAttributes(&funcAttributes[16], transformDoubleIndexes); //cudaFuncGetAttributes(&funcAttributes[17], functions::summarystats::summaryStatsReduceDouble); // cudaFuncGetAttributes(&funcAttributes[18], scalarDoubleIndexes); //void (*scalarDoublePointer1)(int opNum, double dx,double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarDouble; // cudaFuncGetAttributes(&funcAttributes[19], scalarDoubleIndexes); //void (*scalarDoublePointer2)(int opNum, Nd4jIndex n,double dx, double *dy, int incy, double *params, double *result,int resultStride, int *allocPointer) = scalarDouble; // cudaFuncGetAttributes(&funcAttributes[20], scalarDoubleIndexes); cudaFuncGetAttributes(&funcAttributes[21], reduce3Double); cudaFuncGetAttributes(&funcAttributes[22], reduceSimpleGenericXD_0_double); // cudaFuncGetAttributes(&funcAttributes[23], pairWiseTransformDouble); // cudaFuncGetAttributes(&funcAttributes[24], pairWiseTransformDoubleIndex); // cudaFuncGetAttributes(&funcAttributes[25], pairWiseTransformStridedDouble); cudaFuncGetAttributes(&funcAttributes[26], broadcastSimple_0_double); cudaFuncGetAttributes(&funcAttributes[27], indexReduceDouble); cudaFuncGetAttributes(&funcAttributes[32], reduceSimpleGeneric1D_0_double); // 1D cudaFuncGetAttributes(&funcAttributes[33], reduceSimpleGeneric3D_0_double); // 6D cudaFuncGetAttributes(&funcAttributes[34], flattenKernelDouble); cudaFuncGetAttributes(&funcAttributes[35], concatKernelDouble); cudaFuncGetAttributes(&funcAttributes[36], fillDimensionalIsMaxFloat); cudaFuncGetAttributes(&funcAttributes[37], fillDimensionalIsMaxDouble); cudaFuncGetAttributes(&funcAttributes[38], concatKernelScalarFloat); cudaFuncGetAttributes(&funcAttributes[39], concatKernelScalarDouble); cudaFuncGetAttributes(&funcAttributes[40], concatKernelVStackFloat); cudaFuncGetAttributes(&funcAttributes[41], concatKernelVStackDouble); cudaFuncGetAttributes(&funcAttributes[42], concatKernelHStackFloat); cudaFuncGetAttributes(&funcAttributes[43], concatKernelHStackDouble); ///////////////////////// cudaFuncGetAttributes(&funcAttributes[44], averagingKernelHalf); cudaFuncGetAttributes(&funcAttributes[45], averagingKernelFloat); cudaFuncGetAttributes(&funcAttributes[46], averagingKernelDouble); // //cudaFuncGetAttributes(&funcAttributes[47], scalarAlongDimension_0_float); //cudaFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_float16); //cudaFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_double); } void NativeOps::initializeFunctions(Nd4jPointer *functions) { nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions); /* this->cublasSgemv = (CublasSgemv)functions[0]; this->cublasDgemv = (CublasDgemv)functions[1]; this->cublasHgemm = (CublasHgemm)functions[2]; this->cublasSgemm = (CublasSgemm)functions[3]; this->cublasDgemm = (CublasDgemm)functions[4]; this->cublasSgemmEx = (CublasSgemmEx)functions[5]; this->cublasHgemmBatched = (CublasHgemmBatched)functions[6]; this->cublasSgemmBatched = (CublasSgemmBatched)functions[7]; this->cublasDgemmBatched = (CublasDgemmBatched)functions[8]; */ } /** * This method acquires memory chunk of requested size on host side * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param flags optional parameter */ Nd4jPointer NativeOps::mallocHost(Nd4jIndex memorySize, int flags) { Nd4jPointer pointer; // cudaHostAllocMapped |cudaHostAllocPortable cudaError_t res = cudaHostAlloc((void **)&pointer, memorySize, cudaHostAllocDefault); if (res != 0) pointer = 0L; return pointer; } /** * This method acquires memory chunk of requested size on specified device * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc * @param flags optional parameter */ Nd4jPointer NativeOps::mallocDevice(Nd4jIndex memorySize, Nd4jPointer ptrToDeviceId, int flags) { Nd4jPointer pointer; cudaError_t res = cudaMalloc((void **)&pointer, memorySize); if (res != 0) pointer = 0L; return pointer; } /** * This method releases previously allocated host memory space * * @param pointer pointer that'll be freed */ int NativeOps::freeHost(Nd4jPointer pointer) { cudaError_t res = cudaFreeHost((void *) pointer); if (res != 0) pointer = 0L; return 1L; } /** * This method releases previously allocated memory space on device * * @param pointer pointer that'll be freed * @param ptrToDeviceId pointer to deviceId. */ int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) { cudaError_t res = cudaFree((void *)pointer); if (res != 0) pointer = 0L; return 1L; } Nd4jPointer NativeOps::createContext() { return 0L; } Nd4jPointer NativeOps::createStream() { Nd4jPointer nativeStream = (Nd4jPointer) malloc(sizeof(cudaStream_t)); cudaError_t result = cudaStreamCreate((cudaStream_t *) &nativeStream); checkCudaErrors(result); if (result != 0) return 0L; else return nativeStream; } Nd4jPointer NativeOps::createEvent() { Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(cudaEvent_t)); cudaError_t result = cudaEventCreateWithFlags((cudaEvent_t *) &nativeEvent, cudaEventDisableTiming); checkCudaErrors(result); if (result != 0) return 0L; else return nativeEvent; } int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) { cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event); cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream); cudaError_t result = cudaEventRecord(*pEvent, *pStream); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) { int deviceId = getDeviceId(ptrToDeviceId); cudaError_t result = cudaSetDevice(deviceId); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } Nd4jIndex NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; cudaGetDevice(&orig); if (device >= 0 && device != orig) { cudaSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { cudaSetDevice(orig); } return (Nd4jIndex) memFree; } Nd4jIndex NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; cudaGetDevice(&orig); if (device >= 0 && device != orig) { cudaSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { cudaSetDevice(orig); } return (Nd4jIndex) memTotal; } int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) { return memcpyAsync(dst, src, size, flags, reserved); } int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved); cudaMemcpyKind kind; if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*pStream)); switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } case 3: { kind = cudaMemcpyDeviceToDevice; } break; default: { printf("UNDEFINED MEMCPY!\n"); break; } } cudaError_t result = cudaMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream); if (result != 0) { checkCudaErrors(result); printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], result: [%i]\n", src, dst, size, flags, (int) result ); fflush(stdout); fflush(stderr); return 0L; } else return 1; } int NativeOps::memset(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) { cudaError_t result = cudaMemset((void *) dst, value, (size_t) size); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved); cudaError_t result = cudaMemsetAsync((void *) dst, value, (size_t) size, *pStream); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::destroyEvent(Nd4jPointer event) { cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event); cudaError_t result = cudaEventDestroy(*pEvent); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::streamSynchronize(Nd4jPointer stream) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream); cudaError_t result = cudaStreamSynchronize(*pStream); checkCudaErrors(result); if (result != 0) return 0L; else return 1L; } int NativeOps::eventSynchronize(Nd4jPointer event) { cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event); cudaError_t result = cudaEventSynchronize(*pEvent); checkCudaErrors(result); if (result != 0) return 0L; else return 1L; } int NativeOps::getAvailableDevices() { int devCnt = 0; cudaGetDeviceCount(&devCnt); return devCnt; } void NativeOps::enableDebugMode(bool reallyEnable) { nd4j::Environment::getInstance()->setDebug(reallyEnable); } void NativeOps::setGridLimit(int gridSize) { if (gridSize > 8192) gridSize = 8192; if (gridSize < 1) gridSize = 1; blockLimit = gridSize; } int NativeOps::ompGetMaxThreads() { return maxThreads; } int NativeOps::ompGetNumThreads() { return maxThreads; } void NativeOps::setOmpNumThreads(int threads) { if (threads > 1024) threads = 1024; if (threads < 32) threads = 32; maxThreads = threads; } void NativeOps::enableVerboseMode(bool reallyEnable) { nd4j::Environment::getInstance()->setVerbose(reallyEnable); } int NativeOps::getDeviceMajor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].major; } int NativeOps::getDeviceMinor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].minor; } const char * NativeOps::getDeviceName(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].name; } /** * Concatneate multi array of the same shape together * along a particular dimension */ void NativeOps::concatFloat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]); // numArrays will be used as number of TADs, so each block process 1 input int smem = 0; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hostShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') { isVstack = false; break; } } } // let's try to fit N-dimensional vstack if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') { Nd4jIndex length0 = shape::length(hostShapePointers[0]); isVstack = true; for (int i = 0; i < numArrays; i++) { if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going scalar concat\n"); smem = funcAttributes[38].sharedSizeBytes; concatKernelScalarFloat<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isVstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going VStack concat\n"); smem = funcAttributes[40].sharedSizeBytes; concatKernelVStackFloat<<< 128, 512, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isHstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going HStack concat\n"); smem = funcAttributes[42].sharedSizeBytes; concatKernelHStackFloat<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going generic concat\n"); //smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280); int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *devZOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); concatKernelFloat<<< 2048, 128, funcAttributes[31].sharedSizeBytes , *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets); } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::concatHalf( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float16 *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]); // numArrays will be used as number of TADs, so each block process 1 input int smem = 0; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hostShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') { isVstack = false; break; } } } // let's try to fit N-dimensional vstack if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') { Nd4jIndex length0 = shape::length(hostShapePointers[0]); isVstack = true; for (int i = 0; i < numArrays; i++) { if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going scalar concat\n"); smem = funcAttributes[38].sharedSizeBytes; concatKernelScalarHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isVstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going VStack concat\n"); smem = funcAttributes[40].sharedSizeBytes; concatKernelVStackHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isHstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going HStack concat\n"); smem = funcAttributes[42].sharedSizeBytes; concatKernelHStackHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going generic concat\n"); //smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280); int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *devZOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); concatKernelHalf<<< 2048, 128, funcAttributes[31].sharedSizeBytes, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets); } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("sharedMemory requested for concatHalf: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::specialConcatFloat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { nd4j::SpecialMethods<float>::concatCpuGeneric( dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo); } void NativeOps::specialConcatHalf( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float16 *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { nd4j::SpecialMethods<float16>::concatCpuGeneric( dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo); } /** * Concatneate multi array of the same shape together * along a particular dimension */ void NativeOps::specialConcatDouble( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, double *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { nd4j::SpecialMethods<double>::concatCpuGeneric( dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo); } /** * Concatneate multi array of the same shape together * along a particular dimension */ void NativeOps::concatDouble( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, double *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]); // numArrays will be used as number of TADs, so each block process 1 input int smem = 0; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hostShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') { isVstack = false; break; } } } // let's try to fit N-dimensional vstack if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') { Nd4jIndex length0 = shape::length(hostShapePointers[0]); isVstack = true; for (int i = 0; i < numArrays; i++) { if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going scalar concat\n"); smem = funcAttributes[39].sharedSizeBytes; concatKernelScalarDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isVstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going VStack concat\n"); smem = funcAttributes[41].sharedSizeBytes; concatKernelVStackDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isHstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going HStack concat\n"); smem = funcAttributes[43].sharedSizeBytes; concatKernelHStackDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going generic concat\n"); int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *devZOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); concatKernelDouble<<< 2048, 128, funcAttributes[35].sharedSizeBytes, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets); } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("sharedMemory requested for concatDouble: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * This method saves */ void NativeOps::tadOnlyShapeInfo(int *xShapeInfo, int *dimension, int dimensionLength, int *target, Nd4jIndex *offsets) { shape::TAD *tad = new shape::TAD(); tad->init(xShapeInfo, dimension, dimensionLength); //tad->setOutputBuffer(target); tad->createTadOnlyShapeInfo(); tad->createOffsets(); std::memcpy((void *) target, tad->tadOnlyShapeInfo, (tad->tadOnlyShapeInfo[0] * 2 + 4) * sizeof(int)); std::memcpy((void *) offsets, tad->tadOffsets, tad->numTads * sizeof(Nd4jIndex)); /* shape::printShapeInfoLinear(hostXShapeInfo); shape::printShapeInfoLinear(tad->tadOnlyShapeInfo); shape::printShapeInfoLinear(target); */ delete tad; } int NativeOps::memcpyConstantAsync(Nd4jIndex dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved); cudaMemcpyKind kind; if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*pStream)); switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } case 3: { kind = cudaMemcpyDeviceToDevice; } break; } //cudaError_t result = cudaMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream); cudaError_t result = cudaMemcpyToSymbolAsync(deviceConstantMemory, (const void *) src, size, dst, kind, *pStream); checkCudaErrors(result); if (result != 0) { printf("Symbol failed on [%lu] -> [%lu], size: [%i], direction: [%i]\n", src, dst, size, flags ); return 0L; } else return 1; } Nd4jPointer NativeOps::getConstantSpace() { Nd4jPointer dConstAddr; cudaError_t result = cudaGetSymbolAddress((void **)&dConstAddr, deviceConstantMemory); return dConstAddr; } void NativeOps::pullRowsHalf(Nd4jPointer *extraPointers, float16 *x, int *xShapeInfo, float16 *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); pullRowsKernelHalf<<<64, 256, 1024, *stream>>>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::pullRowsFloat(Nd4jPointer *extraPointers, float *x, int *xShapeInfo, float *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); pullRowsKernelFloat<<<64, 256, 1024, *stream>>>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::pullRowsDouble(Nd4jPointer *extraPointers, double *x, int *xShapeInfo, double *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); pullRowsKernelDouble<<<64, 256, 1024, *stream>>>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::averageHalf(Nd4jPointer *extras, Nd4jPointer *dx, float16 *dz, int n, Nd4jIndex length, bool propagate) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); float16 **x = reinterpret_cast<float16 **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("averageHalf called\n"); // launching on gpu if (mode == 0) { dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float16), funcAttributes[44]); averagingKernelHalf<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length, propagate); checkCudaErrors(cudaStreamSynchronize(*stream)); } else { nd4j::SpecialMethods<float16>::averageGeneric(x, dz, n, length, propagate); } } void NativeOps::averageFloat(Nd4jPointer *extras, Nd4jPointer *dx, float *dz, int n, Nd4jIndex length, bool propagate) { cudaStream_t * stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); float **x = reinterpret_cast<float **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("averageFloat called\n"); // launching on gpu if (mode == 0) { dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]); averagingKernelFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length, propagate); checkCudaErrors(cudaStreamSynchronize(*stream)); } else { // launching on host memory nd4j::SpecialMethods<float>::averageGeneric(x, dz, n, length, propagate); } } void NativeOps::averageDouble(Nd4jPointer *extras, Nd4jPointer *dx, double *dz, int n, Nd4jIndex length, bool propagate) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); double **x = reinterpret_cast<double **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("averageDouble called\n"); // launching on gpu if (mode == 0) { dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]); averagingKernelDouble << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (x, dz, n, length, propagate); checkCudaErrors(cudaStreamSynchronize(*stream)); } else { nd4j::SpecialMethods<double>::averageGeneric(x, dz, n, length, propagate); } } void NativeOps::accumulateHalf(Nd4jPointer *extras, Nd4jPointer *dx, float16 *dz, int n, Nd4jIndex length) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); float16 **x = reinterpret_cast<float16 **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("accumulateHalf called\n"); // launching on gpu if (mode == 0) { dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float16), funcAttributes[44]); accumulateKernelHalf<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length); checkCudaErrors(cudaStreamSynchronize(*stream)); } else { nd4j::SpecialMethods<float16>::accumulateGeneric(x, dz, n, length); } } void NativeOps::accumulateFloat(Nd4jPointer *extras, Nd4jPointer *dx, float *dz, int n, Nd4jIndex length) { cudaStream_t * stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); float **x = reinterpret_cast<float **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("accumulateFloat called\n"); // launching on gpu if (mode == 0) { dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]); accumulateKernelFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length); checkCudaErrors(cudaStreamSynchronize(*stream)); } else { // launching on host memory nd4j::SpecialMethods<float>::accumulateGeneric(x, dz, n, length); } } void NativeOps::accumulateDouble(Nd4jPointer *extras, Nd4jPointer *dx, double *dz, int n, Nd4jIndex length) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); double **x = reinterpret_cast<double **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("accumulateDouble called\n"); // launching on gpu if (mode == 0) { dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]); accumulateKernelDouble << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (x, dz, n, length); checkCudaErrors(cudaStreamSynchronize(*stream)); } else { nd4j::SpecialMethods<double>::accumulateGeneric(x, dz, n, length); } } void NativeOps::shuffleDouble(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); double **x = reinterpret_cast<double **>(dx); double **z = reinterpret_cast<double **>(dz); int **xShape = reinterpret_cast<int **>(xShapeInfo); int **zShape = reinterpret_cast<int **>(zShapeInfo); int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo); Nd4jIndex **tadOffset = reinterpret_cast<Nd4jIndex **>(tadOffsets); shuffleKernelDouble<<<32, 128, 1024, *stream>>>(x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::shuffleFloat(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); float **x = reinterpret_cast<float **>(dx); float **z = reinterpret_cast<float **>(dz); int **xShape = reinterpret_cast<int **>(xShapeInfo); int **zShape = reinterpret_cast<int **>(zShapeInfo); int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo); Nd4jIndex **tadOffset = reinterpret_cast<Nd4jIndex **>(tadOffsets); shuffleKernelFloat<<<32, 128, 1024, *stream>>>(x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::shuffleHalf(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); float16 **x = reinterpret_cast<float16 **>(dx); float16 **z = reinterpret_cast<float16 **>(dz); int **xShape = reinterpret_cast<int **>(xShapeInfo); int **zShape = reinterpret_cast<int **>(zShapeInfo); int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo); Nd4jIndex **tadOffset = reinterpret_cast<Nd4jIndex **>(tadOffsets); shuffleKernelHalf<<<32, 128, 1024, *stream>>>(x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateStridedFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float *dx, int xStride, float *dy, int yStride, float *dz, int zStride, float *extraA, float *extraB, float scalarA, float scalarB) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); // metaPredicateStridedFloat<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); /* if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB), float, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } */ functions::grid::GRIDStrided<float>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateStridedDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, double *dx, int xStride, double *dy, int yStride, double *dz, int zStride, double *extraA, double *extraB, double scalarA, double scalarB) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); // metaPredicateStridedDouble<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); /* if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB), double, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } */ functions::grid::GRIDStrided<double>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateStridedHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float16 *dx, int xStride, float16 *dy, int yStride, float16 *dz, int zStride, float16 *extraA, float16 *extraB, float scalarA, float scalarB) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); // metaPredicateStridedHalf<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); float16 scalA = (float16) scalarA; float16 scalB = (float16) scalarB; /* if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalA, scalB), float16, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } */ functions::grid::GRIDStrided<float16>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateReduceFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, float *extraA, float *extraB, float scalarA, float scalarB, bool scalarReturned) { // no-op cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); /* metaPredicateReduceFloat(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, int *tadShapeInfo, int *tadOffsets, float *reductionBuffer, float *extraA, float *extraB, float scalarA, float scalarB) { */ // metaPredicateReduceFloat<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, nullptr, extraA, extraB, scalarA, scalarB, scalarReturned); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateShapeDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, double *dx, int *xShapeInfo, double *dy, int *yShapeInfo, double *dz, int *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB) { // no-op; cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); /* if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), double, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } */ functions::grid::GRIDShaped<double>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateShapeHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float16 *dx, int *xShapeInfo, float16 *dy, int *yShapeInfo, float16 *dz, int *zShapeInfo, float16 *extraA, float16 *extraB, float scalarA, float scalarB) { // no-op; cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); // we have to converf float -> fp16 prior to kernel call float16 scalA = (float16) scalarA; float16 scalB = (float16) scalarB; /* if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalA, scalB), float16, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } */ functions::grid::GRIDShaped<float16>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateShapeFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB) { // no-op; cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); /* if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), float, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } */ functions::grid::GRIDShaped<float>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } bool NativeOps::isExperimentalEnabled() { return experimentalSupport; } void NativeOps::setOmpMinThreads(int threads) { minThreads = nd4j::math::nd4j_max<int>(32, threads); minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads); } int NativeOps::getDevice() { int curDevice = -1; cudaGetDevice(&curDevice); return curDevice; } void NativeOps::setElementThreshold(int num) { // this is no-op for CUDA } void NativeOps::setTADThreshold(int num) { // this is no-op for CUDA } void NativeOps::execScalarFloat(Nd4jPointer *extraPointers,int opNum, float *x, int *xShapeInfo, float *z, int *zShapeInfo, float *scalars, float *extraParams, int *dimension, int dimensionLength) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTadShapeInfo = reinterpret_cast<int *>(extraPointers[9]); //dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]),hostXShapeInfo, hostTadShapeInfo, funcAttributes[47] ,dimensionLength, sizeof(float), 0); dim3 launchDims = dim3(256, 256, 1024); // this macro builds bunch of IF/ELSE selectors for kernel launc h //DISPATCH_SIMPLE(scalarAlongDimension, float, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS)) functions::scalar::ScalarTransform<float>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execScalarDouble(Nd4jPointer *extraPointers,int opNum, double *x, int *xShapeInfo, double *z, int *zShapeInfo, double *scalars, double *extraParams, int *dimension, int dimensionLength) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 256, 1024); // this macro builds bunch of IF/ELSE selectors for kernel launch //DISPATCH_SIMPLE(scalarAlongDimension, double, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS)) functions::scalar::ScalarTransform<double>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execScalarHalf(Nd4jPointer *extraPointers,int opNum, float16 *x, int *xShapeInfo, float16 *z, int *zShapeInfo, float16 *scalars, float16 *extraParams, int *dimension, int dimensionLength) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 256, 1024); /* int *tadShapeInfo = reinterpret_cast<int *>(extraPointers[10]); Nd4jIndex *tadOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]); int *tadShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); Nd4jIndex *tadOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]); */ // this macro builds bunch of IF/ELSE selectors for kernel launch //DISPATCH_SIMPLE(scalarAlongDimension, float16, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS)) functions::scalar::ScalarTransform<float16>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execAggregateFloat(Nd4jPointer *extraPointers,int opNum, float **arguments, int numArguments, int **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, float *realArguments, int numRealArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateSimple, float, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS)) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execAggregateDouble(Nd4jPointer *extraPointers,int opNum, double **arguments, int numArguments, int **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, double *realArguments, int numRealArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateSimple, double, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS)) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execAggregateHalf(Nd4jPointer *extraPointers,int opNum, float16 **arguments, int numArguments, int **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, float16 *realArguments, int numRealArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateSimple, float16, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS)) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execAggregateBatchFloat(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) { // not implemented yet cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateBatchSimple, float, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execAggregateBatchDouble(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) { // not implemented yet cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateBatchSimple, double, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execAggregateBatchHalf(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) { // not implemented yet cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateBatchSimple, float16, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *z, int *zShapeBuffer, float *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) ); functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, int *xShapeBuffer, float *y, int *yShapeBuffer, float *z, int *zShapeBuffer, float *extraArguments) { dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) ); functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, int *xShapeBuffer, float *z, int *zShapeBuffer, float *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) ); functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *z, int *zShapeBuffer, double *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double))); functions::random::RandomFunction<double>::executeCudaSingle(launchDims, extraPointers, opNum, state, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, int *xShapeBuffer, double *y, int *yShapeBuffer, double *z, int *zShapeBuffer, double *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double))); functions::random::RandomFunction<double>::executeCudaTriple(launchDims, extraPointers, opNum, state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, int *xShapeBuffer, double *z, int *zShapeBuffer, double *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double))); functions::random::RandomFunction<double>::executeCudaDouble(launchDims, extraPointers, opNum, state, x, xShapeBuffer, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *z, int *zShapeBuffer, float16 *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16))); functions::random::RandomFunction<float16>::executeCudaSingle(launchDims, extraPointers, opNum, state, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, int *xShapeBuffer, float16 *y, int *yShapeBuffer, float16 *z, int *zShapeBuffer, float16 *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16))); functions::random::RandomFunction<float16>::executeCudaTriple(launchDims, extraPointers, opNum, state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments); } void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, int *xShapeBuffer, float16 *z, int *zShapeBuffer, float16 *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16))); functions::random::RandomFunction<float16>::executeCudaDouble(launchDims, extraPointers, opNum, state, x, xShapeBuffer, z, zShapeBuffer, extraArguments); } Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) { unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // we don't synchronize at random initialization, it's safe to go unsync here // cudaStreamSynchronize(*stream); unsigned long long *ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer); nd4j::random::RandomBuffer *buffer = new nd4j::random::RandomBuffer(seed, bufferSize, (uint64_t *) ptrHost, (uint64_t *) ptrDev); buffer->propagateToDevice(buffer, *stream); checkCudaErrors(cudaStreamSynchronize(*stream)); // we generate sequence in the host memory nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // and copy it to gpu cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream); checkCudaErrors(cudaStreamSynchronize(*stream)); return buffer; } void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer); // FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice cudaDeviceSynchronize(); delete buffer; } void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); cudaStreamSynchronize(*stream); uint64_t *ptrDev = buffer->getDeviceBuffer(); // update rng state buffer->setSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); // refresh buffer on host size nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // copy back to gpu cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream); } void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); cudaStreamSynchronize(*stream); // update rng state buffer->reSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); } /** * * @param npyArray * @return */ Nd4jPointer NativeOps::shapeBufferForNumpy(Nd4jPointer npyArray) { /* cnpy::NpyArray *arrPointer = reinterpret_cast<cnpy::NpyArray *>(npyArray); int *shapeBuffer = shape::shapeBufferOfNpy(*arrPointer); return reinterpret_cast<Nd4jPointer>(shapeBuffer); */ cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray)); unsigned int *shape = new unsigned int[arr.shape.size()]; for(int i = 0; i < arr.shape.size(); i++) { shape[i] = arr.shape[i]; } int *shapeBuffer = shape::shapeBufferOfNpy(arr.shape.size(), shape, arr.fortranOrder); delete[] shape; return reinterpret_cast<Nd4jPointer>(shapeBuffer); } /** * * @param npyArray * @return */ Nd4jPointer NativeOps::dataPointForNumpy(Nd4jPointer npyArray) { char *buff = reinterpret_cast<char *>(npyArray); //printf("Pointer contents %s\n",buff); cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray)); cnpy::NpyArray *arrPointer = &arr; char *data = arrPointer->data; if(arrPointer->wordSize == sizeof(float)) { float *floatData = reinterpret_cast<float *>(data); return reinterpret_cast<Nd4jPointer>(floatData); } else if(arrPointer->wordSize == sizeof(double)) { double *doubleData = reinterpret_cast<double *>(data); return reinterpret_cast<Nd4jPointer >(doubleData); } return reinterpret_cast<Nd4jPointer >(0); } /** * Load a numpy array from a file * and return it as an Nd4jPointer * @param path * @return */ Nd4jPointer NativeOps::numpyFromFile(std::string path) { /*cnpy::NpyArray arr = cnpy::npyLoad(path); return reinterpret_cast<Nd4jPointer >(&arr); */ char *numpyBuffer = cnpy::loadFile(path.data()); return reinterpret_cast<Nd4jPointer >(numpyBuffer); } void NativeOps::releaseNumpy(Nd4jPointer npyArray) { free((void *) npyArray); } /** * Return the length of a shape buffer * based on the pointer * @param buffer the buffer pointer to check * @return */ int NativeOps::lengthForShapeBufferPointer(Nd4jPointer buffer) { int *shapeBuffer = reinterpret_cast<int *>(buffer); return shape::shapeInfoLength(shape::rank(shapeBuffer)); } /** * Get the element size for a numpy array * @param npyArray the numpy array's address * to get the length for * @return */ int NativeOps::elementSizeForNpyArray(Nd4jPointer npyArray) { cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray)); cnpy::NpyArray *arrPointer = &arr; int size = arrPointer->wordSize; return size; /* cnpy::NpyArray *arr = reinterpret_cast<cnpy::NpyArray *>(npyArray); return arr->wordSize; */ } /** * The pointer to get the address for * * @param address the address to get the pointer * @return the pointer for the given address */ Nd4jPointer NativeOps::pointerForAddress(Nd4jIndex address) { return reinterpret_cast<Nd4jPointer >(address); } void NativeOps::tearDouble(Nd4jPointer *extras, double *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); tearKernelDouble<<<512, 512, 512, *stream>>>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::tearFloat(Nd4jPointer *extras, float *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); tearKernelFloat<<<512, 512, 512, *stream>>>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::tearHalf(Nd4jPointer *extras, float16 *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); tearKernelHalf<<<512, 512, 512, *stream>>>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::encodeThresholdP1Float(Nd4jPointer *extras, float *dx, Nd4jIndex N, int *dz, float threshold) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); encoderKernelP1Float<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz, threshold); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::encodeThresholdP1Double(Nd4jPointer *extras, double *dx, Nd4jIndex N, int *dz, float threshold) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); encoderKernelP1Double<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz, threshold); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::encodeThresholdP1Half(Nd4jPointer *extras, float16 *dx, Nd4jIndex N, int *dz, float threshold) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); encoderKernelP1Half<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz, threshold); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jIndex N, int *dz) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); //encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz); // it prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::encodeThresholdP3Float(Nd4jPointer *extraPointers, float *dx, int *offsets, Nd4jIndex N, int *dz){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); encoderKernelP3Float<<<numBlocks, blockSize , 4096, *stream>>>(dx, offsets, N, dz); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::encodeThresholdP3Double(Nd4jPointer *extraPointers, double *dx, int *offsets, Nd4jIndex N, int *dz){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); encoderKernelP3Double<<<numBlocks, blockSize , 4096, *stream>>>(dx, offsets, N, dz); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::encodeThresholdP3Half(Nd4jPointer *extraPointers, float16 *dx, int *offsets, Nd4jIndex N, int *dz){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); encoderKernelP3Half<<<numBlocks, blockSize , 4096, *stream>>>(dx, offsets, N, dz); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::decodeThresholdFloat(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float *dz){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // we probably want to have smaller blocks here, memory writes are misaligned anyway int blockSize = 128; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); decoderKernelFloat<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::decodeThresholdDouble(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, double *dz){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // we probably want to have smaller blocks here, memory writes are misaligned anyway int blockSize = 128; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); decoderKernelDouble<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::decodeThresholdHalf(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float16 *dz){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // we probably want to have smaller blocks here, memory writes are misaligned anyway int blockSize = 128; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); decoderKernelHalf<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execReduce3AllDouble(Nd4jPointer *extraPointers, int opNum, double *x, int *xInfo, double *extraParamsVals, double *y, int *yInfo, double *result, int *resultShapeInfoBuffer, int *dimension, int dimensionLength, int *xTadShapeInfo, Nd4jIndex *xOffsets, int *yTadShapeInfo, Nd4jIndex *yOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D119 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(double), 2); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AD119 opNum:[%i]\n", opNum); reduce3AllDouble<<<launchDims.x, 512, (512 * 8 * 2 + 512), *stream>>>( opNum, x, xInfo, y, yInfo, extraParamsVals, result, resultShapeInfoBuffer, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execReduce3AllFloat(Nd4jPointer *extraPointers, int opNum, float *x, int *xInfo, float *extraParamsVals, float *y, int *yInfo, float *result, int *resultShapeInfoBuffer, int *dimension, int dimensionLength, int *xTadShapeInfo, Nd4jIndex *xOffsets, int *yTadShapeInfo, Nd4jIndex *yOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F119 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(float), 2); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF119 opNum:[%i]\n", opNum); reduce3AllFloat<<<launchDims.x, 512, (512 * 4 * 2 + 512), *stream>>>( opNum, x, xInfo, y, yInfo, extraParamsVals, result, resultShapeInfoBuffer, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execReduce3AllHalf(Nd4jPointer *extraPointers, int opNum, float16 *x, int *xInfo, float16 *extraParamsVals, float16 *y, int *yInfo, float16 *result, int *resultShapeInfoBuffer, int *dimension, int dimensionLength, int *xTadShapeInfo, Nd4jIndex *xOffsets, int *yTadShapeInfo, Nd4jIndex *yOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H119 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(float16), 2); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AH119 opNum:[%i]\n", opNum); reduce3AllHalf<<<launchDims.x, 512, (512 * 2 * 2 + 512), *stream>>>( opNum, x, xInfo, y, yInfo, extraParamsVals, result, resultShapeInfoBuffer, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets); if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::sortFloat(Nd4jPointer *extraPointers, float *x, int *xShapeInfo, bool descending) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[ 1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int xLength = shape::length(hostXShapeInfo); int xEWS = shape::elementWiseStride(hostXShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; for (int k = 2; k <= xLength; k = 2*k) { for (int j = k >> 1; j > 0; j = j >> 1) { cudaBitonicSortFloat<<<numBlocks, numThreads, 512, *stream>>>(x, xShapeInfo, j, k, xLength, descending); } } } else { #ifdef __clang__ if (1 > 0) { #elif __GNUC__ if ((xLength > 1024 * 1024 * 10) && xEWS == 1) { b40c::radix_sort::Enactor enactor; b40c::util::DoubleBuffer<float> sort_storage(x); enactor.Sort(sort_storage, xLength); // fire reverse op if (descending) execTransformFloat(extraPointers, 70, x, xShapeInfo, x, xShapeInfo, nullptr); } else { #else if (1 > 0) { #endif int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window<<=1) { int n = window; int rev = 0; do{ int half = n >> 1; cudaSortFloat<<<numBlocks, numThreads, numThreads * 2 * sizeof(float), *stream>>>(x, xShapeInfo, n, xLength, rev, descending); n>>=1; rev = 1; } while(n > 1); } } } checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::sortDouble(Nd4jPointer *extraPointers, double *x, int *xShapeInfo, bool descending) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int xLength = shape::length(hostXShapeInfo); int xEWS = shape::elementWiseStride(hostXShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; for (int k = 2; k <= xLength; k = 2*k) { for (int j = k >> 1; j > 0; j = j >> 1) { cudaBitonicSortDouble<<<numBlocks, numThreads, 512, *stream>>>(x, xShapeInfo, j, k, xLength, descending); } } } else { #ifdef __clang__ if (1 > 0) { #elif __GNUC__ if ((xLength > 1024 * 1024 * 10) && xEWS == 1) { b40c::radix_sort::Enactor enactor; b40c::util::DoubleBuffer<double> sort_storage(x); enactor.Sort(sort_storage, xLength); // fire reverse op if (descending) execTransformDouble(extraPointers, 70, x, xShapeInfo, x, xShapeInfo, nullptr); } else { #else if ( 1 > 0) { #endif int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window<<=1) { int n = window; int rev = 0; do{ int half = n >> 1; cudaSortDouble<<<numBlocks, numThreads, numThreads * 2 * sizeof(double), *stream>>>(x, xShapeInfo, n, xLength, rev, descending); n>>=1; rev = 1; } while(n > 1); } } } checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::sortHalf(Nd4jPointer *extraPointers, float16 *x, int *xShapeInfo, bool descending) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int xLength = shape::length(hostXShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; for (int k = 2; k <= xLength; k = 2*k) { for (int j = k >> 1; j > 0; j = j >> 1) { cudaBitonicSortHalf<<<numBlocks, numThreads, 512, *stream>>>(x, xShapeInfo, j, k, xLength, descending); } } } else { // half is incompatible with radix, so only bitonic here int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window<<=1) { int n = window; int rev = 0; do{ int half = n >> 1; cudaSortHalf<<<numBlocks, numThreads, numThreads * 2 * sizeof(float16), *stream>>>(x, xShapeInfo, n, xLength, rev, descending); n>>=1; rev = 1; } while(n > 1); } } checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::sortTadFloat(Nd4jPointer *extraPointers, float *x, int *xShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, bool descending) { // to be implemented cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); cudaSortTadFloat<<<512, 512, 1088 * sizeof(float), *stream>>>(x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::sortTadHalf(Nd4jPointer *extraPointers, float16 *x, int *xShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, bool descending) { // to be implemented cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); cudaSortTadHalf<<<512, 512, 1088 * sizeof(float16), *stream>>>(x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::sortTadDouble(Nd4jPointer *extraPointers, double *x, int *xShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, bool descending) { // to be implemented cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); cudaSortTadDouble<<<512, 512, 1088 * sizeof(double), *stream>>>(x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::sortCooIndicesFloat(Nd4jPointer *extraPointers, int *indices, float *values, Nd4jIndex length, int rank) { } void NativeOps::sortCooIndicesDouble(Nd4jPointer *extraPointers, int *indices, double *values, Nd4jIndex length, int rank) { } void NativeOps::sortCooIndicesHalf(Nd4jPointer *extraPointers, int *indices, float16 *values, Nd4jIndex length, int rank) { } Nd4jIndex NativeOps::encodeBitmapFloat(Nd4jPointer *extraPointers, float *dx, Nd4jIndex N, int *dz, float threshold) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *resultPointer = reinterpret_cast<int *>(extraPointers[2]); int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]); cudaEncodeBitmapFloat<<<512, 512, 512 * 2 * sizeof(float) + 384, *stream>>>(dx, N, dz, resultPointer, reductionPointer, threshold); checkCudaErrors(cudaStreamSynchronize(*stream)); Nd4jIndex result = (Nd4jIndex) resultPointer[0]; resultPointer[0] = 0; return result; } Nd4jIndex NativeOps::encodeBitmapDouble(Nd4jPointer *extraPointers, double *dx, Nd4jIndex N, int *dz, float threshold) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *resultPointer = reinterpret_cast<int *>(extraPointers[2]); int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]); cudaEncodeBitmapDouble<<<512, 512, 512 * 2 * sizeof(double) + 384, *stream>>>(dx, N, dz, resultPointer, reductionPointer, threshold); checkCudaErrors(cudaStreamSynchronize(*stream)); Nd4jIndex result = (Nd4jIndex) resultPointer[0]; resultPointer[0] = 0; return result; } Nd4jIndex NativeOps::encodeBitmapHalf(Nd4jPointer *extraPointers, float16 *dx, Nd4jIndex N, int *dz, float threshold) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *resultPointer = reinterpret_cast<int *>(extraPointers[2]); int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]); cudaEncodeBitmapHalf<<<512, 512, (512 * sizeof(float16)) + (512 * sizeof(int)) + 384, *stream>>>(dx, N, dz, resultPointer, reductionPointer, threshold); checkCudaErrors(cudaStreamSynchronize(*stream)); Nd4jIndex result = (Nd4jIndex) resultPointer[0]; resultPointer[0] = 0; return result; } void NativeOps::decodeBitmapFloat(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float *dz) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); cudaDecodeBitmapFloat<<<512, 512, 512 * sizeof(float) + 384, *stream>>>(dx, N, dz); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::decodeBitmapDouble(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, double *dz) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); cudaDecodeBitmapDouble<<<512, 512, 512 * sizeof(double) + 384, *stream>>>(dx, N, dz); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::decodeBitmapHalf(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float16 *dz) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); cudaDecodeBitmapHalf<<<512, 512, 512 * sizeof(float16) + 384, *stream>>>(dx, N, dz); checkCudaErrors(cudaStreamSynchronize(*stream)); } Nd4jIndex* NativeOps::mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jIndex length) { return nullptr; } void NativeOps::munmapFile(Nd4jPointer *extraPointers, Nd4jIndex* ptrMap, Nd4jIndex length) { } Nd4jPointer NativeOps::executeProtoGraphFloat(Nd4jPointer *extraPointers, Nd4jPointer protoBufferPointer) { return nullptr; } Nd4jPointer NativeOps::executeProtoGraphFloat(Nd4jPointer *extraPointers, const char *fileName) { return nullptr; } Nd4jPointer NativeOps::executeFlatGraphFloat(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { return nullptr; } Nd4jPointer NativeOps::executeFlatGraphHalf(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { return nullptr; } Nd4jPointer NativeOps::executeFlatGraphDouble(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { return nullptr; } const char* NativeOps::getAllCustomOps() { return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations(); } template<typename T> nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, T* tArgs, int numTArgs, int *iArgs, int numIArgs) { nd4j::graph::VariableSpace<T> varSpace; Context<T> block(2, &varSpace); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) { auto shape_ = (int *) inputShapes[e]; auto buffer_ = (T *) inputBuffers[e]; auto array = new nd4j::NDArray<T>(buffer_, shape_); array->triggerAllocationFlag(false, false); // block should contain references to proper variable varSpace.putVariable(1, e, array); block.pickInput(1, e); inShapes.push_back(shape_); } auto shapeList = op->calculateOutputShape(&inShapes, block); if (varSpace.workspace() != nullptr) shapeList->detach(); return shapeList; } nd4j::ShapeList* NativeOps::calculateOutputShapesFloat(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, float* tArgs, int numTArgs, int *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash); return _calculateOutputShapes<float>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } nd4j::ShapeList* NativeOps::calculateOutputShapesHalf(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, float16* tArgs, int numTArgs, int *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash); return _calculateOutputShapes<float16>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } nd4j::ShapeList* NativeOps::calculateOutputShapesDouble(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, int *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash); return _calculateOutputShapes<double>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } template<typename T> nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* inputShapes, int numInputShapes, T* tArgs, int numTArgs, int *iArgs, int numIArgs) { nd4j::graph::Context<T> block(1); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) inShapes.push_back((int *) inputShapes[e]); auto shapeList = op->calculateOutputShape(&inShapes, block); return shapeList; } nd4j::ShapeList* NativeOps::calculateOutputShapesFloat(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputShapes, int numInputShapes, float* tArgs, int numTArgs, int *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash); return _calculateOutputShapes<float>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } nd4j::ShapeList* NativeOps::calculateOutputShapesHalf(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputShapes, int numInputShapes, float16* tArgs, int numTArgs, int *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash); return _calculateOutputShapes<float16>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } nd4j::ShapeList* NativeOps::calculateOutputShapesDouble(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, int *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash); return _calculateOutputShapes<double>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } template<typename T> static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, T* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) { if (op == nullptr) nd4j_printf("Can't find requested operation: [%lld]\n", hash); // we're using the same fake nodeId everywhere here std::vector<nd4j::NDArray<T>*> inputs(numInputs); std::vector<nd4j::NDArray<T>*> outputs; std::vector<T> ttArgs(numTArgs); std::vector<int> iiArgs(numIArgs); // filling block now for (int e = 0; e < numInputs; e++) { auto buffer = (T *) inputBuffers[e]; auto shape = (int *) inputShapes[e]; // auto var = new Variable<T>(new NDArray<T>(buffer, shape)); // block.getVariables()->emplace_back(var); auto array = new nd4j::NDArray<T>(buffer, shape); //array->setSpecialBuffers( (T *) inputBuffers[e + numInputs], (int *) inputShapes[e + numInputs]); inputs[e] = array; } for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e]; for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e]; // hypothetically at this point we have everything filled auto result = op->execute(inputs, ttArgs, iiArgs, isInplace); if (result->status() != ND4J_STATUS_OK) return result->status(); if (!isInplace) { if (result->size() != numOutputs) { return ND4J_STATUS_BAD_OUTPUT; } for (int e = 0; e < numOutputs; e++) { auto buffer = (T *) outputBuffers[e]; auto shape = (int *) outputShapes[e]; nd4j::NDArray <T> tmp(buffer, shape); tmp.assign(result->at(e)); } } delete result; for (auto ptr: inputs) delete ptr; return ND4J_STATUS_OK; } int NativeOps::execCustomOpFloat(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, float* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash); return realExec<float>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace); } int NativeOps::execCustomOpDouble(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash); return realExec<double>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace); } int NativeOps::execCustomOpHalf(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, float16* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash); return realExec<float16>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace); } int NativeOps::registerGraphFloat(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer flatBufferPointer) { auto graph = nd4j::graph::GraphExecutioner<float>::importFromFlatPointer(flatBufferPointer); nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph); return ND4J_STATUS_OK; } int NativeOps::registerGraphDouble(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer flatBufferPointer) { auto graph = nd4j::graph::GraphExecutioner<double>::importFromFlatPointer(flatBufferPointer); nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph); return ND4J_STATUS_OK; } int NativeOps::registerGraphHalf(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer flatBufferPointer) { auto graph = nd4j::graph::GraphExecutioner<float16>::importFromFlatPointer(flatBufferPointer); nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph); return ND4J_STATUS_OK; } template <typename T> static VariablesSet<T>* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph<T>(graphId); auto varSpace = graph->getVariableSpace()->clone(); std::vector<nd4j::NDArray<T> *> handles; for (int e = 0; e < numInputs; e++) { auto idx = inputIndices[e]; // we'll delete this array later, together with cloned VariableSpace auto array = new nd4j::NDArray<T>((T *) inputBuffers[e], (int *) inputShapes[e]); handles.emplace_back(array); if (varSpace->hasVariable(idx)) { auto var = varSpace->getVariable(idx); if (var->hasNDArray()) delete var->getNDArray(); var->setNDArray(array); } else varSpace->putVariable(idx, array); } auto result = nd4j::graph::GraphExecutioner<T>::execute(graph, varSpace); auto varSet = new nd4j::graph::VariablesSet<T>(result); if (result == ND4J_STATUS_OK) { // pull back results, and provide them auto outputs = graph->fetchOutputs(); for (int e = 0; e < outputs->size(); e++) { // we're only getting variable ID/Index from original grap. values will be taken from cloned workspace std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index()); auto var = varSpace->getVariable(varId); varSet->push_back(var->clone()); } delete outputs; } delete varSpace; return varSet; } VariablesSet<float>* NativeOps::executeStoredGraphFloat(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { return executeStoredGraphT<float>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } VariablesSet<float16>* NativeOps::executeStoredGraphHalf(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { return executeStoredGraphT<float16>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } VariablesSet<double>* NativeOps::executeStoredGraphDouble(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { return executeStoredGraphT<double>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } int NativeOps::unregisterGraph(Nd4jPointer *extraPointers, Nd4jIndex graphId) { nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId); return ND4J_STATUS_OK; } void NativeOps::deletePointerArray(Nd4jPointer pointer) { Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer); delete[] ptr; } void NativeOps::deleteIntArray(Nd4jPointer pointer) { int *ptr = reinterpret_cast<int *>(pointer); delete[] ptr; } template <typename T> static void deleteVariablesSetT(Nd4jPointer pointer) { nd4j::graph::VariablesSet<T>* ptr = reinterpret_cast<nd4j::graph::VariablesSet<T>*>(pointer); delete ptr; } void NativeOps::deleteVariablesSetFloat(Nd4jPointer pointer) { deleteVariablesSetT<float>(pointer); } void NativeOps::deleteVariablesSetHalf(Nd4jPointer pointer) { deleteVariablesSetT<float16>(pointer); } void NativeOps::deleteVariablesSetDouble(Nd4jPointer pointer) { deleteVariablesSetT<double>(pointer); } void NativeOps::deleteShapeList(Nd4jPointer shapeList) { nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList); list->destroy(); delete list; } const char* NativeOps::getAllOperations() { return nd4j::OpTracker::getInstance()->exportOperations(); } Nd4jPointer NativeOps::getGraphStateHalf(Nd4jIndex id) { return (Nd4jPointer) new nd4j::graph::GraphState<float16>(id); } Nd4jPointer NativeOps::getGraphStateFloat(Nd4jIndex id) { return (Nd4jPointer) new nd4j::graph::GraphState<float>(id); } Nd4jPointer NativeOps::getGraphStateDouble(Nd4jIndex id) { return (Nd4jPointer) new nd4j::graph::GraphState<double>(id); } void NativeOps::deleteGraphStateHalf(Nd4jPointer state) { auto stateP = (nd4j::graph::GraphState<float16> *) state; delete stateP; } void NativeOps::deleteGraphStateFloat(Nd4jPointer state) { auto stateP = (nd4j::graph::GraphState<float> *) state; delete stateP; } void NativeOps::deleteGraphStateDouble(Nd4jPointer state) { auto stateP = (nd4j::graph::GraphState<double> *) state; delete stateP; } template <typename T> Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState<T> *state, Nd4jIndex opHash, Nd4jIndex *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { /** * That's basically exec, with VariableSpace provided in GraphState: * depending on operation (i.e. while of if), different logic executors could be used */ auto graph = state->graph(); auto varSpace = state->variableSpace(); // Node is dynamically created, and has nothing beyond it: only inputs and outputs // this node has id of 0, and inputs are nd4j::graph::Node<T> node(OpType_LOGIC, opHash, 0); // mapping inputs for (int e = 0; e < numInputs; e++) { auto buffer = (T *) inputBuffers[e]; auto shapeInfo = (int *) inputShapes[e]; auto array = new nd4j::NDArray<T>(buffer, shapeInfo, varSpace->workspace()); // now we just put array to VarSpace varSpace->putVariable(0, e, array); node.pickInput(0, e); } // mapping scopes for (int e = 0; e < numScopes; e++) { // we should check scope existence in GraphState/Graph int scopeId = (int) scopes[e]; if (!state->hasScope(scopeId)) { nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId); return Status::THROW(); } node.pickInput(scopeId, 0); } auto result = LogicExecutor<T>::processNode(graph, &node); if (result != Status::OK()) return result; // mapping outputs for (int e = 0; e < numOutputs; e++) { auto buffer = (T *) outputBuffers[e]; auto shapeInfo = (int *) outputShapes[e]; nd4j::NDArray<T> array(buffer, shapeInfo, varSpace->workspace()); // now we just put array to VarSpace to the same ID //varSpace->putVariable(0, e, array); auto t = varSpace->getVariable(0, e)->getNDArray(); array.assign(t); } // removing input variables for (int e = 0; e < numInputs; e++) { varSpace->dropVariable(0, e); } // after some bla-bla-bla we should have Graph and Node for current op return Status::OK(); } Nd4jStatus NativeOps::execCustomOpWithScopeHalf(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jIndex opHash, Nd4jIndex *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { return execCustomOpWithScope<float16>(extraPointers, (nd4j::graph::GraphState<float16> *) state, opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); } Nd4jStatus NativeOps::execCustomOpWithScopeFloat(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jIndex opHash, Nd4jIndex *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { return execCustomOpWithScope<float>(extraPointers, (nd4j::graph::GraphState<float> *) state, opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); } Nd4jStatus NativeOps::execCustomOpWithScopeDouble(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jIndex opHash, Nd4jIndex *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { return execCustomOpWithScope<double>(extraPointers, (nd4j::graph::GraphState<double> *) state, opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); }
c42332bc77545520f2c653a9b5b3257b054fa54b.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <ctime> #include <hip/hip_runtime.h> using namespace std; const int N = 400; const int P = 600; const int M = 200; float A[N][P]; float B[P][M]; float C[N][M]; float *A_D,*B_D,*C_D; void Init_Data(){ //Mat A for (int i = 0;i < N;++i){ for (int j = 0;j < P;++j){ A[i][j] = (j+1) * 1.f / (N * (i+1)); } } //Mat B for (int i = 0;i < P ;++i){ for (int j = 0;j < M;++j){ B[i][j] = M * 1.0 / ((i+1) * (j+1)); } } //Copy data to cuda hipMalloc((void **)&A_D, sizeof(float) * N * P); hipMalloc((void **)&B_D, sizeof(float) * P * M); hipMalloc((void **)&C_D, sizeof(float) * N * M); hipMemcpy(A_D, (void*)A, sizeof(float) * N * P, hipMemcpyHostToDevice); hipMemcpy(B_D, (void*)B, sizeof(float) * P * M, hipMemcpyHostToDevice); } __global__ void MatMul(float *A_D, float *B_D, float *C_D){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i >= N || j >= M)return; float v = 0; for (int k = 0;k < P;++k){ v += A_D[i * P + k] * B_D[k * M + j]; } C_D[i * M + j] = v; } void Output(){ hipMemcpy((void*)C, C_D, sizeof(float) * N * M, hipMemcpyDeviceToHost); ofstream fout("cudaresult.txt"); for (int i = 0;i < N;++i){ for (int j = 0;j < M;++j){ if (j != 0){ fout << " "; } fout << C[i][j]; } fout << endl; } } int main(){ Init_Data(); dim3 dimGrid(20, 20); //dim3 dimBlock(N,M); //dim3 dimBlock((N+31)/32*32,(M+31)/32*32); dim3 dimBlock(20,10); clock_t t = clock(); hipLaunchKernelGGL(( MatMul), dim3(dimGrid), dim3(dimBlock), 0, 0, A_D, B_D, C_D); cout << "Cuda Used Time: "<< double((clock() - t)*1.0/CLOCKS_PER_SEC) << endl; Output(); //Release Source hipFree(A_D); hipFree(B_D); hipFree(C_D); return 0; }
c42332bc77545520f2c653a9b5b3257b054fa54b.cu
#include <iostream> #include <fstream> #include <ctime> #include <cuda_runtime.h> using namespace std; const int N = 400; const int P = 600; const int M = 200; float A[N][P]; float B[P][M]; float C[N][M]; float *A_D,*B_D,*C_D; void Init_Data(){ //Mat A for (int i = 0;i < N;++i){ for (int j = 0;j < P;++j){ A[i][j] = (j+1) * 1.f / (N * (i+1)); } } //Mat B for (int i = 0;i < P ;++i){ for (int j = 0;j < M;++j){ B[i][j] = M * 1.0 / ((i+1) * (j+1)); } } //Copy data to cuda cudaMalloc((void **)&A_D, sizeof(float) * N * P); cudaMalloc((void **)&B_D, sizeof(float) * P * M); cudaMalloc((void **)&C_D, sizeof(float) * N * M); cudaMemcpy(A_D, (void*)A, sizeof(float) * N * P, cudaMemcpyHostToDevice); cudaMemcpy(B_D, (void*)B, sizeof(float) * P * M, cudaMemcpyHostToDevice); } __global__ void MatMul(float *A_D, float *B_D, float *C_D){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i >= N || j >= M)return; float v = 0; for (int k = 0;k < P;++k){ v += A_D[i * P + k] * B_D[k * M + j]; } C_D[i * M + j] = v; } void Output(){ cudaMemcpy((void*)C, C_D, sizeof(float) * N * M, cudaMemcpyDeviceToHost); ofstream fout("cudaresult.txt"); for (int i = 0;i < N;++i){ for (int j = 0;j < M;++j){ if (j != 0){ fout << " "; } fout << C[i][j]; } fout << endl; } } int main(){ Init_Data(); dim3 dimGrid(20, 20); //dim3 dimBlock(N,M); //dim3 dimBlock((N+31)/32*32,(M+31)/32*32); dim3 dimBlock(20,10); clock_t t = clock(); MatMul<<<dimGrid, dimBlock>>>(A_D, B_D, C_D); cout << "Cuda Used Time: "<< double((clock() - t)*1.0/CLOCKS_PER_SEC) << endl; Output(); //Release Source cudaFree(A_D); cudaFree(B_D); cudaFree(C_D); return 0; }
b08fbe7e3ff6a485a21e74dfe8053216c64e2d8e.hip
// !!! This is a file automatically generated by hipify!!! // #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define DataSize 16 void GenerateNumbers(int *number, int size, int k)// { int i; srand(k * time(NULL)); for (i = 0; i < size; i++) number[i] = rand() % 100; } __global__ void Add_A(int *Da, int *Dc)//kernel function { int tx = threadIdx.x; //threadxid int bx = blockIdx.x; //blockxid int bn = blockDim.x; //blockxthread int id = bx*bn+tx; // Dc[id] = Da[id*2] + Da[id*2+1]; } int main() { int *Ha, *Hc; //CPU int size = DataSize * sizeof(int); Ha = (int*)malloc(size); // //Hb = (int*)malloc(size); // Hc = (int*)malloc(size); // GenerateNumbers(Ha, DataSize, 2); // //GenerateNumbers(Hb, DataSize, 6); // /* dim3 CUDA (X,Y,Z) CUDAblockthread1024, (X*Y*Z)<=1024 gridblock65535, (X*Y)<=65535. block2 */ dim3 block(DataSize/2, 1, 1); //thread dim3 grid(2, 1, 1); //block int *Da, *Dc; //GPU hipMalloc((void**)&Da, size); //GPU //hipMalloc((void**)&Db, size); //GPU hipMalloc((void**)&Dc, size); //GPU hipMemcpy(Da, Ha, size, hipMemcpyHostToDevice); //GPU //hipMemcpy(Db, Hb, size, hipMemcpyHostToDevice); //GPU hipLaunchKernelGGL(( Add_A) , dim3(grid), dim3(block) , 0, 0, Da, Dc); //kernel hipDeviceSynchronize(); hipMemcpy(Hc, Dc, size, hipMemcpyDeviceToHost); //()CPU int i; printf("A\n"); for (i = 0; i < DataSize; i++) printf("%3d ", Ha[i]); //printf("\nB\n"); //for (i = 0; i < DataSize; i++) //printf("%3d ", Hb[i]); printf("\nC\n"); for (i = 0; i < DataSize/2; i++) printf("%3d ", Hc[i]); printf("\n"); // free(Ha); free(Hc); hipFree(Da); hipFree(Dc); }
b08fbe7e3ff6a485a21e74dfe8053216c64e2d8e.cu
//一維陣列相加的範例程式 #include <stdio.h> #include <stdlib.h> #include <cuda.h> #define DataSize 16 void GenerateNumbers(int *number, int size, int k)//隨機產生資料 { int i; srand(k * time(NULL)); for (i = 0; i < size; i++) number[i] = rand() % 100; } __global__ void Add_A(int *Da, int *Dc)//kernel function { int tx = threadIdx.x; //thread的x軸id int bx = blockIdx.x; //block的x軸id int bn = blockDim.x; //block的x軸有幾個thread int id = bx*bn+tx; //計算矩陣座標 Dc[id] = Da[id*2] + Da[id*2+1]; } int main() { int *Ha, *Hc; //CPU int size = DataSize * sizeof(int); Ha = (int*)malloc(size); //配置矩陣空間 //Hb = (int*)malloc(size); //配置矩陣空間 Hc = (int*)malloc(size); //配置矩陣空間 GenerateNumbers(Ha, DataSize, 2); //產生矩陣資料 //GenerateNumbers(Hb, DataSize, 6); //產生矩陣資料 /* dim3 由CUDA提供的三維向量型態 (X,Y,Z) CUDA限制每個block的thread上限為1024, (X*Y*Z)<=1024 grid的block上限為65535, (X*Y)<=65535. block最多2維而已 */ dim3 block(DataSize/2, 1, 1); //配置thread維度、大小 dim3 grid(2, 1, 1); //配置block維度、大小 int *Da, *Dc; //GPU cudaMalloc((void**)&Da, size); //配置GPU矩陣空間 //cudaMalloc((void**)&Db, size); //配置GPU矩陣空間 cudaMalloc((void**)&Dc, size); //配置GPU矩陣空間 cudaMemcpy(Da, Ha, size, cudaMemcpyHostToDevice); //複製資料到GPU //cudaMemcpy(Db, Hb, size, cudaMemcpyHostToDevice); //複製資料到GPU Add_A <<< grid, block >>> (Da, Dc); //呼叫kernel cudaThreadSynchronize(); cudaMemcpy(Hc, Dc, size, cudaMemcpyDeviceToHost); //複製資料(比較後的結果)回CPU int i; printf("A\n"); for (i = 0; i < DataSize; i++) printf("%3d ", Ha[i]); //printf("\nB\n"); //for (i = 0; i < DataSize; i++) //printf("%3d ", Hb[i]); printf("\nC\n"); for (i = 0; i < DataSize/2; i++) printf("%3d ", Hc[i]); printf("\n"); //釋放記憶體空間 free(Ha); free(Hc); cudaFree(Da); cudaFree(Dc); }
5f92a41d85013fe450dab7772b88f96456e8387d.hip
// !!! This is a file automatically generated by hipify!!! //*LB* // Copyright (c) 2010, University of Bonn, Institute for Computer Science VI // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of the University of Bonn // nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written // permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //*LE* #include <iostream> #include <hip/hip_runtime.h> #include <cuv/tools/texture.h> #include <stdexcept> #include <cuv/tools/cuv_general.hpp> #include <cuv/basics/tensor.hpp> #include <cuv/image_ops/move.hpp> using namespace std; /** * @brief convert four rgb pixels to gray simultaenously */ uchar4 __host__ __device__ rgb_to_y(uchar4 pixel1, uchar4 pixel2, uchar4 pixel3, uchar4 pixel4) { return make_uchar4( 0.299f * pixel1.x + 0.587f * pixel1.y + 0.114f * pixel1.z, 0.299f * pixel2.x + 0.587f * pixel2.y + 0.114f * pixel2.z, 0.299f * pixel3.x + 0.587f * pixel3.y + 0.114f * pixel3.z, 0.299f * pixel4.x + 0.587f * pixel4.y + 0.114f * pixel4.z); } /** * @brief convert an rgb pixel to gray */ uchar4 __host__ __device__ rgb_to_y(uchar1 pixel1, uchar1 pixel2, uchar1 pixel3, uchar1 pixel4) { return make_uchar4(pixel1.x, pixel2.x, pixel3.x, pixel4.x); } /** * @brief bilinear interpolation of for 4 pixels simultaenously */ uchar4 __host__ __device__ interpolate(uchar4 pixel1, uchar4 pixel2, uchar4 pixel3, uchar4 pixel4, float xfrac, float yfrac) { return make_uchar4( (1.0f-yfrac) * ((1.0f-xfrac)*pixel1.x + xfrac*pixel2.x) + yfrac * ((1.0f-xfrac)*pixel3.x + xfrac*pixel4.x), (1.0f-yfrac) * ((1.0f-xfrac)*pixel1.y + xfrac*pixel2.y) + yfrac * ((1.0f-xfrac)*pixel3.y + xfrac*pixel4.y), (1.0f-yfrac) * ((1.0f-xfrac)*pixel1.z + xfrac*pixel2.z) + yfrac * ((1.0f-xfrac)*pixel3.z + xfrac*pixel4.z), 0); } /** * @brief bilinear interpolation for single pixel */ unsigned char __host__ __device__ interpolate(unsigned char pixel1, unsigned char pixel2, unsigned char pixel3, unsigned char pixel4, float xfrac, float yfrac) { return ((1.0f-yfrac) * ((1.0f-xfrac)*pixel1 + xfrac*pixel2) + yfrac * ((1.0f-xfrac)*pixel3 + xfrac*pixel4)); } /** * @brief bilinear interpolation for single pixel */ uchar1 __host__ __device__ interpolate(uchar1 pixel1, uchar1 pixel2, uchar1 pixel3, uchar1 pixel4, float xfrac, float yfrac) { return make_uchar1( (1.0f-yfrac) * ((1.0f-xfrac)*pixel1.x + xfrac*pixel2.x) + yfrac * ((1.0f-xfrac)*pixel3.x + xfrac*pixel4.x)); } /** * @brief fetch a pixel using texture memory/global memory depending on template param */ template<bool UseCache> __device__ uchar4 get_pixel(bool inrange, unsigned int index, uchar4 oorcolor, const uchar4* base) { return (inrange ? fetch_x<UseCache>(base, index) : oorcolor); } /** * @brief fetch a pixel using texture memory/global memory depending on template param */ template<bool UseCache> __device__ uchar1 get_pixel(bool inrange, unsigned int index, uchar1 oorcolor, const uchar1* base) { return (inrange ? fetch_x<UseCache>(base, index) : oorcolor); } /** * @brief fetch a pixel using texture memory/global memory depending on template param */ template<bool UseCache> __device__ uchar4 get_pixel(unsigned int index, const uchar4* base, uchar4 orgcolor, float dst) { uchar4 p = fetch_x<UseCache>(base, index); p.x = dst*orgcolor.x + (1.f-dst)*p.x; p.y = dst*orgcolor.y + (1.f-dst)*p.y; p.z = dst*orgcolor.z + (1.f-dst)*p.z; return p; } /** * @brief fetch a pixel using texture memory/global memory depending on template param */ template<bool UseCache> __device__ uchar1 get_pixel(unsigned int index, const uchar1* base, uchar1 orgcolor, float dst) { /*return dst*fetch_x<UseCache>(base, index) + (1.f-dst)*orgcolor.x;*/ uchar1 p = fetch_x<UseCache>(base, index); p.x = dst*orgcolor.x + (1.f-dst)*p.x; return p; } /** * @brief set a pixel to gray */ __device__ void set_default_color(uchar4& p){ p = make_uchar4(128,128,128,0); } /** * @brief set a pixel to gray */ __device__ void set_default_color(uchar1& p){ p = make_uchar1(128); } /** * set the three input maps to the decorrelated color values. */ template <class T> void __host__ __device__ set_pca_maps (T* map1, T* map2, T* map3, unsigned int index, uchar4 pixel) { map1[index] = (T)((-0.5525f*pixel.x - 0.5719f*pixel.y - 0.6063f*pixel.z + 441.3285f) * 0.004531772f - 1.0f); map2[index] = (T)(( 0.7152f*pixel.x + 0.0483f*pixel.y - 0.6973f*pixel.z + 177.8115f) * 0.005369070f - 1.0f); map3[index] = (T)((-0.4281f*pixel.x + 0.8189f*pixel.y - 0.3823f*pixel.z + 206.6520f) * 0.004813808f - 1.0f); } /** * set the first input map to the gray value of the pixel */ template <class T> void __host__ __device__ set_pca_maps(T* map1, T* map2, T* map3, unsigned int index, uchar1 pixel) { /*map1[index] = (T)(pixel.x * 0.007843137f - 1.0f);*/ map1[index] = (T)(pixel.x); } /** * @brief kernel for moving images up and down * @note mostly shamelessly stolen from rafael uetz * * @param mapsize number of pixels in an output map * @param src source pixels * @param xshift how much to shift left/right * @param yshift how much to shift up/down * @param patwidth width and height of the target image * @param enlarge whether to scale up the image * */ template<bool UseCache, class dst_pixel, class pixel> __global__ void move_image_kernel(dst_pixel* dst, const pixel* src, char xshift, char yshift, unsigned int patwidth, unsigned char dst_num_maps, bool enlarge){ const int iw = blockDim.x; // Determine input map width // Get x- and y-position of the input maps represented by the current thread const int mapx = threadIdx.x; const int mapy = blockIdx.x; // Get x- and y-position of the input pattern for the current position on the input maps. // Set inrange to false if the calculated position is out of range. int patx, paty; float patxf = 0.0f, patyf = 0.0f; bool inrange; pixel default_color = fetch_x<UseCache>(src, patwidth*patwidth*blockIdx.y); if (enlarge) { // Calculate x- and y-position on the input pattern for this thread patxf = (float(mapx - xshift) / iw) * patwidth; patyf = (float(mapy - yshift) / iw) * patwidth; // Store rounded position patx = int(patxf); paty = int(patyf); // Calculate remainder (required for interpolation) patxf -= patx; patyf -= paty; // Determine if the map pixel represented by the current thread shows a pixel of the pattern // (inrange=true) or is filled with the default color (inrange=false) inrange = (mapx >= xshift) && (mapy >= yshift) && (mapx < iw+xshift) && (mapy < iw+yshift); if(!inrange){ char xn = max(1,min(patx,(int)patwidth-2)); char yn = max(1,min(paty,(int)patwidth-2)); default_color = get_pixel<UseCache>(patwidth*patwidth*blockIdx.y + patwidth*yn + xn,src, default_color,min(1.f,max(0.f,0.13f*(float)(abs(patx-xn)+abs(paty-yn))) )); } } else { // Determines at which x- and y-position of the map the pattern starts /*const int offset = iw/2 - patwidth/2;*/ const int offset = 0; // Calculate x- and y-position on the input pattern for this thread patx = mapx - offset - xshift; paty = mapy - offset - yshift; // Determine if the map pixel represented by the current thread shows a pixel of the pattern // (inrange=true) or is filled with the default color (inrange=false) inrange = (patx >= 0) && (patx < patwidth) && (paty >= 0) && (paty < patwidth); if(!inrange){ char xn = max(1,min(patx,(int)patwidth-2)); char yn = max(1,min(paty,(int)patwidth-2)); default_color = get_pixel<UseCache>(patwidth*patwidth*blockIdx.y + patwidth*yn + xn,src, default_color,min(1.f,max(0.f,0.13f*(float)(abs(patx-xn)+abs(paty-yn)) ))); } } // Get index of processed pattern in the current mini batch const unsigned int patidx = blockIdx.y; pixel pixel1, pixel2, pixel3, pixel4; pixel ipx; /*uchar4 graypx;*/ /*uchar4 grayipx;*/ // Fetch colors of four adjacent pixels from texture. // If out of range, use default color defined above. // 1 2 // 3 4 pixel1 = get_pixel<UseCache>(inrange, patwidth*patwidth*patidx + patwidth*(paty+0) + (patx+0), default_color,src); pixel2 = get_pixel<UseCache>(inrange, patwidth*patwidth*patidx + patwidth*(paty+0) + (patx+1), default_color,src); pixel3 = get_pixel<UseCache>(inrange, patwidth*patwidth*patidx + patwidth*(paty+1) + (patx+0), default_color,src); pixel4 = get_pixel<UseCache>(inrange, patwidth*patwidth*patidx + patwidth*(paty+1) + (patx+1), default_color,src); // Calculate gray values of each of the four pixels. // x y // z w /*graypx = rgb_to_y(pixel1, pixel2, pixel3, pixel4);*/ // Interpolate color and edges for current position from the four source pixels if enlargement is enabled if (enlarge) { /*const float gap = float(patwidth) / iw;*/ ipx = interpolate(pixel1, pixel2, pixel3, pixel4, patxf, patyf); /*grayipx.x = interpolate(graypx.x, graypx.y, graypx.z, graypx.w, max(patxf-gap/2, 0.0f), max(patyf-gap/2, 0.0f));*/ /*grayipx.y = interpolate(graypx.x, graypx.y, graypx.z, graypx.w, min(patxf+gap/2, 1.0f), max(patyf-gap/2, 0.0f));*/ /*grayipx.z = interpolate(graypx.x, graypx.y, graypx.z, graypx.w, max(patxf-gap/2, 0.0f), min(patyf+gap/2, 1.0f));*/ /*grayipx.w = interpolate(graypx.x, graypx.y, graypx.z, graypx.w, min(patxf+gap/2, 1.0f), min(patyf+gap/2, 1.0f));*/ } else { ipx = pixel1; /*grayipx = graypx;*/ } const unsigned int wholeimgsize = dst_num_maps*iw*iw; set_pca_maps(dst + wholeimgsize*patidx + 0*iw*iw , dst + wholeimgsize*patidx + 1*iw*iw , dst + wholeimgsize*patidx + 2*iw*iw, iw*mapy + mapx, ipx); } #define V(X) #X << "=" <<(X) << ", " namespace cuv { namespace image_move_impl { template<class __value_typeA, class __value_typeB> void image_move(tensor<__value_typeA,dev_memory_space,column_major>& dst, const tensor<__value_typeB,dev_memory_space,column_major>& src, const unsigned int& src_image_size, const unsigned int& dst_image_size, const unsigned int& src_num_maps, const char& xshift, const char& yshift){ cuvAssert(dst.shape().size()==2); cuvAssert(src.shape().size()==2); const unsigned char dst_num_maps = src_num_maps == 4 ? 3 : 1; cuvAssert(src.shape()[1] == dst.shape()[1]); cuvAssert(src.shape()[0] % (src_image_size*src_num_maps) == 0); cuvAssert(dst.shape()[0] % (dst_image_size*dst_num_maps) == 0); dim3 blockDim(dst_image_size); dim3 gridDim (dst_image_size,src.shape()[1]); static const bool UseCache = true; const bool enlarge = dst_image_size != src_image_size; if(src_num_maps == 4){ typedef uchar4 T; const T* src_ptr = reinterpret_cast<const T*>(src.ptr()); if(UseCache) bind_x(src_ptr, src.size()/src_num_maps); hipLaunchKernelGGL(( move_image_kernel<UseCache>), dim3(gridDim),dim3(blockDim), 0, 0, dst.ptr(),src_ptr,xshift,yshift,src_image_size,dst_num_maps,enlarge); if(UseCache) unbind_x(src_ptr); }else if(src_num_maps == 1){ typedef uchar1 T; const T* src_ptr = reinterpret_cast<const T*>(src.ptr()); if(UseCache) bind_x(src_ptr, src.size()); hipLaunchKernelGGL(( move_image_kernel<UseCache>), dim3(gridDim),dim3(blockDim), 0, 0, dst.ptr(),src_ptr,xshift,yshift,src_image_size,dst_num_maps,enlarge); if(UseCache) unbind_x(src_ptr); }else{ throw std::runtime_error("wrong image format: Need RGBA interleaved _or_ grayscale"); } cuvSafeCall(hipDeviceSynchronize()); } template<class __value_typeA, class __value_typeB> void image_move(tensor<__value_typeA,host_memory_space,column_major>& dst, const tensor<__value_typeB,host_memory_space,column_major>& src, const unsigned int& image_width, const unsigned int& image_height, const unsigned int& num_maps, const char& xshift, const char& yshift){ throw std::runtime_error("not implemented"); } }; template<class __value_typeA, class __value_typeB, class __memory_space_type, class __memory_layout_type> void image_move(tensor<__value_typeA,__memory_space_type,__memory_layout_type>& dst, const tensor<__value_typeB,__memory_space_type,__memory_layout_type>& src, const unsigned int& image_width, const unsigned int& image_height, const unsigned int& num_maps, const int& xshift, const int& yshift){ cuvAssert(dst.shape().size()==2); cuvAssert(src.shape().size()==2); image_move_impl::image_move(dst,src,image_width,image_height,num_maps,(char)xshift,(char)yshift); } #define INST(A,B) \ template \ void image_move(tensor<A,dev_memory_space,column_major>&,const tensor<B,dev_memory_space,column_major>&, const unsigned int&, const unsigned int&, const unsigned int&, const int&, const int&); \ template \ void image_move(tensor<A,host_memory_space,column_major>&,const tensor<B,host_memory_space,column_major>&, const unsigned int&, const unsigned int&, const unsigned int&, const int&, const int&); \ INST(float,unsigned char); INST(unsigned char,unsigned char); };
5f92a41d85013fe450dab7772b88f96456e8387d.cu
//*LB* // Copyright (c) 2010, University of Bonn, Institute for Computer Science VI // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of the University of Bonn // nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written // permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //*LE* #include <iostream> #include <cuda.h> #include <cuv/tools/texture.h> #include <stdexcept> #include <cuv/tools/cuv_general.hpp> #include <cuv/basics/tensor.hpp> #include <cuv/image_ops/move.hpp> using namespace std; /** * @brief convert four rgb pixels to gray simultaenously */ uchar4 __host__ __device__ rgb_to_y(uchar4 pixel1, uchar4 pixel2, uchar4 pixel3, uchar4 pixel4) { return make_uchar4( 0.299f * pixel1.x + 0.587f * pixel1.y + 0.114f * pixel1.z, 0.299f * pixel2.x + 0.587f * pixel2.y + 0.114f * pixel2.z, 0.299f * pixel3.x + 0.587f * pixel3.y + 0.114f * pixel3.z, 0.299f * pixel4.x + 0.587f * pixel4.y + 0.114f * pixel4.z); } /** * @brief convert an rgb pixel to gray */ uchar4 __host__ __device__ rgb_to_y(uchar1 pixel1, uchar1 pixel2, uchar1 pixel3, uchar1 pixel4) { return make_uchar4(pixel1.x, pixel2.x, pixel3.x, pixel4.x); } /** * @brief bilinear interpolation of for 4 pixels simultaenously */ uchar4 __host__ __device__ interpolate(uchar4 pixel1, uchar4 pixel2, uchar4 pixel3, uchar4 pixel4, float xfrac, float yfrac) { return make_uchar4( (1.0f-yfrac) * ((1.0f-xfrac)*pixel1.x + xfrac*pixel2.x) + yfrac * ((1.0f-xfrac)*pixel3.x + xfrac*pixel4.x), (1.0f-yfrac) * ((1.0f-xfrac)*pixel1.y + xfrac*pixel2.y) + yfrac * ((1.0f-xfrac)*pixel3.y + xfrac*pixel4.y), (1.0f-yfrac) * ((1.0f-xfrac)*pixel1.z + xfrac*pixel2.z) + yfrac * ((1.0f-xfrac)*pixel3.z + xfrac*pixel4.z), 0); } /** * @brief bilinear interpolation for single pixel */ unsigned char __host__ __device__ interpolate(unsigned char pixel1, unsigned char pixel2, unsigned char pixel3, unsigned char pixel4, float xfrac, float yfrac) { return ((1.0f-yfrac) * ((1.0f-xfrac)*pixel1 + xfrac*pixel2) + yfrac * ((1.0f-xfrac)*pixel3 + xfrac*pixel4)); } /** * @brief bilinear interpolation for single pixel */ uchar1 __host__ __device__ interpolate(uchar1 pixel1, uchar1 pixel2, uchar1 pixel3, uchar1 pixel4, float xfrac, float yfrac) { return make_uchar1( (1.0f-yfrac) * ((1.0f-xfrac)*pixel1.x + xfrac*pixel2.x) + yfrac * ((1.0f-xfrac)*pixel3.x + xfrac*pixel4.x)); } /** * @brief fetch a pixel using texture memory/global memory depending on template param */ template<bool UseCache> __device__ uchar4 get_pixel(bool inrange, unsigned int index, uchar4 oorcolor, const uchar4* base) { return (inrange ? fetch_x<UseCache>(base, index) : oorcolor); } /** * @brief fetch a pixel using texture memory/global memory depending on template param */ template<bool UseCache> __device__ uchar1 get_pixel(bool inrange, unsigned int index, uchar1 oorcolor, const uchar1* base) { return (inrange ? fetch_x<UseCache>(base, index) : oorcolor); } /** * @brief fetch a pixel using texture memory/global memory depending on template param */ template<bool UseCache> __device__ uchar4 get_pixel(unsigned int index, const uchar4* base, uchar4 orgcolor, float dst) { uchar4 p = fetch_x<UseCache>(base, index); p.x = dst*orgcolor.x + (1.f-dst)*p.x; p.y = dst*orgcolor.y + (1.f-dst)*p.y; p.z = dst*orgcolor.z + (1.f-dst)*p.z; return p; } /** * @brief fetch a pixel using texture memory/global memory depending on template param */ template<bool UseCache> __device__ uchar1 get_pixel(unsigned int index, const uchar1* base, uchar1 orgcolor, float dst) { /*return dst*fetch_x<UseCache>(base, index) + (1.f-dst)*orgcolor.x;*/ uchar1 p = fetch_x<UseCache>(base, index); p.x = dst*orgcolor.x + (1.f-dst)*p.x; return p; } /** * @brief set a pixel to gray */ __device__ void set_default_color(uchar4& p){ p = make_uchar4(128,128,128,0); } /** * @brief set a pixel to gray */ __device__ void set_default_color(uchar1& p){ p = make_uchar1(128); } /** * set the three input maps to the decorrelated color values. */ template <class T> void __host__ __device__ set_pca_maps (T* map1, T* map2, T* map3, unsigned int index, uchar4 pixel) { map1[index] = (T)((-0.5525f*pixel.x - 0.5719f*pixel.y - 0.6063f*pixel.z + 441.3285f) * 0.004531772f - 1.0f); map2[index] = (T)(( 0.7152f*pixel.x + 0.0483f*pixel.y - 0.6973f*pixel.z + 177.8115f) * 0.005369070f - 1.0f); map3[index] = (T)((-0.4281f*pixel.x + 0.8189f*pixel.y - 0.3823f*pixel.z + 206.6520f) * 0.004813808f - 1.0f); } /** * set the first input map to the gray value of the pixel */ template <class T> void __host__ __device__ set_pca_maps(T* map1, T* map2, T* map3, unsigned int index, uchar1 pixel) { /*map1[index] = (T)(pixel.x * 0.007843137f - 1.0f);*/ map1[index] = (T)(pixel.x); } /** * @brief kernel for moving images up and down * @note mostly shamelessly stolen from rafael uetz * * @param mapsize number of pixels in an output map * @param src source pixels * @param xshift how much to shift left/right * @param yshift how much to shift up/down * @param patwidth width and height of the target image * @param enlarge whether to scale up the image * */ template<bool UseCache, class dst_pixel, class pixel> __global__ void move_image_kernel(dst_pixel* dst, const pixel* src, char xshift, char yshift, unsigned int patwidth, unsigned char dst_num_maps, bool enlarge){ const int iw = blockDim.x; // Determine input map width // Get x- and y-position of the input maps represented by the current thread const int mapx = threadIdx.x; const int mapy = blockIdx.x; // Get x- and y-position of the input pattern for the current position on the input maps. // Set inrange to false if the calculated position is out of range. int patx, paty; float patxf = 0.0f, patyf = 0.0f; bool inrange; pixel default_color = fetch_x<UseCache>(src, patwidth*patwidth*blockIdx.y); if (enlarge) { // Calculate x- and y-position on the input pattern for this thread patxf = (float(mapx - xshift) / iw) * patwidth; patyf = (float(mapy - yshift) / iw) * patwidth; // Store rounded position patx = int(patxf); paty = int(patyf); // Calculate remainder (required for interpolation) patxf -= patx; patyf -= paty; // Determine if the map pixel represented by the current thread shows a pixel of the pattern // (inrange=true) or is filled with the default color (inrange=false) inrange = (mapx >= xshift) && (mapy >= yshift) && (mapx < iw+xshift) && (mapy < iw+yshift); if(!inrange){ char xn = max(1,min(patx,(int)patwidth-2)); char yn = max(1,min(paty,(int)patwidth-2)); default_color = get_pixel<UseCache>(patwidth*patwidth*blockIdx.y + patwidth*yn + xn,src, default_color,min(1.f,max(0.f,0.13f*(float)(abs(patx-xn)+abs(paty-yn))) )); } } else { // Determines at which x- and y-position of the map the pattern starts /*const int offset = iw/2 - patwidth/2;*/ const int offset = 0; // Calculate x- and y-position on the input pattern for this thread patx = mapx - offset - xshift; paty = mapy - offset - yshift; // Determine if the map pixel represented by the current thread shows a pixel of the pattern // (inrange=true) or is filled with the default color (inrange=false) inrange = (patx >= 0) && (patx < patwidth) && (paty >= 0) && (paty < patwidth); if(!inrange){ char xn = max(1,min(patx,(int)patwidth-2)); char yn = max(1,min(paty,(int)patwidth-2)); default_color = get_pixel<UseCache>(patwidth*patwidth*blockIdx.y + patwidth*yn + xn,src, default_color,min(1.f,max(0.f,0.13f*(float)(abs(patx-xn)+abs(paty-yn)) ))); } } // Get index of processed pattern in the current mini batch const unsigned int patidx = blockIdx.y; pixel pixel1, pixel2, pixel3, pixel4; pixel ipx; /*uchar4 graypx;*/ /*uchar4 grayipx;*/ // Fetch colors of four adjacent pixels from texture. // If out of range, use default color defined above. // 1 2 // 3 4 pixel1 = get_pixel<UseCache>(inrange, patwidth*patwidth*patidx + patwidth*(paty+0) + (patx+0), default_color,src); pixel2 = get_pixel<UseCache>(inrange, patwidth*patwidth*patidx + patwidth*(paty+0) + (patx+1), default_color,src); pixel3 = get_pixel<UseCache>(inrange, patwidth*patwidth*patidx + patwidth*(paty+1) + (patx+0), default_color,src); pixel4 = get_pixel<UseCache>(inrange, patwidth*patwidth*patidx + patwidth*(paty+1) + (patx+1), default_color,src); // Calculate gray values of each of the four pixels. // x y // z w /*graypx = rgb_to_y(pixel1, pixel2, pixel3, pixel4);*/ // Interpolate color and edges for current position from the four source pixels if enlargement is enabled if (enlarge) { /*const float gap = float(patwidth) / iw;*/ ipx = interpolate(pixel1, pixel2, pixel3, pixel4, patxf, patyf); /*grayipx.x = interpolate(graypx.x, graypx.y, graypx.z, graypx.w, max(patxf-gap/2, 0.0f), max(patyf-gap/2, 0.0f));*/ /*grayipx.y = interpolate(graypx.x, graypx.y, graypx.z, graypx.w, min(patxf+gap/2, 1.0f), max(patyf-gap/2, 0.0f));*/ /*grayipx.z = interpolate(graypx.x, graypx.y, graypx.z, graypx.w, max(patxf-gap/2, 0.0f), min(patyf+gap/2, 1.0f));*/ /*grayipx.w = interpolate(graypx.x, graypx.y, graypx.z, graypx.w, min(patxf+gap/2, 1.0f), min(patyf+gap/2, 1.0f));*/ } else { ipx = pixel1; /*grayipx = graypx;*/ } const unsigned int wholeimgsize = dst_num_maps*iw*iw; set_pca_maps(dst + wholeimgsize*patidx + 0*iw*iw , dst + wholeimgsize*patidx + 1*iw*iw , dst + wholeimgsize*patidx + 2*iw*iw, iw*mapy + mapx, ipx); } #define V(X) #X << "=" <<(X) << ", " namespace cuv { namespace image_move_impl { template<class __value_typeA, class __value_typeB> void image_move(tensor<__value_typeA,dev_memory_space,column_major>& dst, const tensor<__value_typeB,dev_memory_space,column_major>& src, const unsigned int& src_image_size, const unsigned int& dst_image_size, const unsigned int& src_num_maps, const char& xshift, const char& yshift){ cuvAssert(dst.shape().size()==2); cuvAssert(src.shape().size()==2); const unsigned char dst_num_maps = src_num_maps == 4 ? 3 : 1; cuvAssert(src.shape()[1] == dst.shape()[1]); cuvAssert(src.shape()[0] % (src_image_size*src_num_maps) == 0); cuvAssert(dst.shape()[0] % (dst_image_size*dst_num_maps) == 0); dim3 blockDim(dst_image_size); dim3 gridDim (dst_image_size,src.shape()[1]); static const bool UseCache = true; const bool enlarge = dst_image_size != src_image_size; if(src_num_maps == 4){ typedef uchar4 T; const T* src_ptr = reinterpret_cast<const T*>(src.ptr()); if(UseCache) bind_x(src_ptr, src.size()/src_num_maps); move_image_kernel<UseCache><<<gridDim,blockDim>>>(dst.ptr(),src_ptr,xshift,yshift,src_image_size,dst_num_maps,enlarge); if(UseCache) unbind_x(src_ptr); }else if(src_num_maps == 1){ typedef uchar1 T; const T* src_ptr = reinterpret_cast<const T*>(src.ptr()); if(UseCache) bind_x(src_ptr, src.size()); move_image_kernel<UseCache><<<gridDim,blockDim>>>(dst.ptr(),src_ptr,xshift,yshift,src_image_size,dst_num_maps,enlarge); if(UseCache) unbind_x(src_ptr); }else{ throw std::runtime_error("wrong image format: Need RGBA interleaved _or_ grayscale"); } cuvSafeCall(cudaThreadSynchronize()); } template<class __value_typeA, class __value_typeB> void image_move(tensor<__value_typeA,host_memory_space,column_major>& dst, const tensor<__value_typeB,host_memory_space,column_major>& src, const unsigned int& image_width, const unsigned int& image_height, const unsigned int& num_maps, const char& xshift, const char& yshift){ throw std::runtime_error("not implemented"); } }; template<class __value_typeA, class __value_typeB, class __memory_space_type, class __memory_layout_type> void image_move(tensor<__value_typeA,__memory_space_type,__memory_layout_type>& dst, const tensor<__value_typeB,__memory_space_type,__memory_layout_type>& src, const unsigned int& image_width, const unsigned int& image_height, const unsigned int& num_maps, const int& xshift, const int& yshift){ cuvAssert(dst.shape().size()==2); cuvAssert(src.shape().size()==2); image_move_impl::image_move(dst,src,image_width,image_height,num_maps,(char)xshift,(char)yshift); } #define INST(A,B) \ template \ void image_move(tensor<A,dev_memory_space,column_major>&,const tensor<B,dev_memory_space,column_major>&, const unsigned int&, const unsigned int&, const unsigned int&, const int&, const int&); \ template \ void image_move(tensor<A,host_memory_space,column_major>&,const tensor<B,host_memory_space,column_major>&, const unsigned int&, const unsigned int&, const unsigned int&, const int&, const int&); \ INST(float,unsigned char); INST(unsigned char,unsigned char); };
32e7e2720bf383e7601696c2914041096321a4c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Threshold-based Contrasting GPU implementation // #include <unistd.h> #include <stdlib.h> #define THREADS_PER_BLOCK 128 // CUDA kernel declaration __global__ void cuda_contrast_kernel(unsigned char *u8data, unsigned char *u8res, unsigned int N, unsigned int thresh, unsigned int delta); // C/C++ Wrapper unsigned char *gpu_contrastu8(unsigned char *u8data, unsigned char *u8res, unsigned int N, unsigned int thresh, unsigned int delta) { // Host memory unsigned char *host_u8res; // Device memory unsigned char *dev_u8data; unsigned char *dev_u8res; size_t size = N * sizeof(unsigned char); host_u8res = (unsigned char *)malloc(size); // Allocated device memory hipMalloc((void **)&dev_u8data, size); hipMalloc((void **)&dev_u8res, size); // Upload data to device memory hipMemcpy(dev_u8data, u8data, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( cuda_contrast_kernel), dim3(((N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, 0, dev_u8data, dev_u8res, N, thresh, delta); hipMemcpy(host_u8res, dev_u8res, size, hipMemcpyDeviceToHost); hipFree(dev_u8data); hipFree(dev_u8res); return host_u8res; } // CUDA kernel __global__ void cuda_contrast_kernel(unsigned char *u8data, unsigned char *u8res, unsigned int N, unsigned int thresh, unsigned int delta) { int idx = threadIdx.x + (blockDim.x *blockIdx.x); if (idx < N) { if (u8data[idx] >= thresh) { if (u8data[idx] + delta >= 255) { u8res[idx] = 255; } else { u8res[idx] = u8data[idx] + delta; } } else { if (u8data[idx] - delta <= 0) { u8res[idx] = 0; } else { u8res[idx] = u8data[idx] - delta; } } } }
32e7e2720bf383e7601696c2914041096321a4c4.cu
// // Threshold-based Contrasting GPU implementation // #include <unistd.h> #include <stdlib.h> #define THREADS_PER_BLOCK 128 // CUDA kernel declaration __global__ void cuda_contrast_kernel(unsigned char *u8data, unsigned char *u8res, unsigned int N, unsigned int thresh, unsigned int delta); // C/C++ Wrapper unsigned char *gpu_contrastu8(unsigned char *u8data, unsigned char *u8res, unsigned int N, unsigned int thresh, unsigned int delta) { // Host memory unsigned char *host_u8res; // Device memory unsigned char *dev_u8data; unsigned char *dev_u8res; size_t size = N * sizeof(unsigned char); host_u8res = (unsigned char *)malloc(size); // Allocated device memory cudaMalloc((void **)&dev_u8data, size); cudaMalloc((void **)&dev_u8res, size); // Upload data to device memory cudaMemcpy(dev_u8data, u8data, size, cudaMemcpyHostToDevice); cuda_contrast_kernel<<<((N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(dev_u8data, dev_u8res, N, thresh, delta); cudaMemcpy(host_u8res, dev_u8res, size, cudaMemcpyDeviceToHost); cudaFree(dev_u8data); cudaFree(dev_u8res); return host_u8res; } // CUDA kernel __global__ void cuda_contrast_kernel(unsigned char *u8data, unsigned char *u8res, unsigned int N, unsigned int thresh, unsigned int delta) { int idx = threadIdx.x + (blockDim.x *blockIdx.x); if (idx < N) { if (u8data[idx] >= thresh) { if (u8data[idx] + delta >= 255) { u8res[idx] = 255; } else { u8res[idx] = u8data[idx] + delta; } } else { if (u8data[idx] - delta <= 0) { u8res[idx] = 0; } else { u8res[idx] = u8data[idx] - delta; } } } }
4d95934fbcae66b61a26bc32d1e9ba13b54c851e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <errno.h> #include <omp.h> #include <semaphore.h> #ifdef MPI_ON #include <mpi.h> #endif #include "chooseV.h" #include "signal.h" int* mapNodeSize; //============================================= ftype* __restrict__ hostKpmlx1; ftype* __restrict__ hostKpmlx2; ftype* __restrict__ hostKpmly1; ftype* __restrict__ hostKpmly2; ftype* __restrict__ hostKpmlz1; ftype* __restrict__ hostKpmlz2; GeoParamsHost parsHost; __constant__ GeoParams pars; __constant__ uint32_t drop_cells[Ns*NDT*Nwarps]; __constant__ int devNStripe[NDev] = STRIPES; __constant__ ftype Kpmlx1[(KNpmlx==0)?1:KNpmlx]; __constant__ ftype Kpmlx2[(KNpmlx==0)?1:KNpmlx]; __constant__ ftype Kpmly1[(KNpmly==0)?1:KNpmly]; __constant__ ftype Kpmly2[(KNpmly==0)?1:KNpmly]; __constant__ ftype Kpmlz1[(KNpmlz==0)?1:KNpmlz]; __constant__ ftype Kpmlz2[(KNpmlz==0)?1:KNpmlz]; //__shared__ ftype2 shared_fld[2][7][Nz]; //__shared__ ftype2 shared_fld[(FTYPESIZE*Nv*28>0xc000)?7:14][Nv]; __shared__ ftype2 shared_fld[SHARED_SIZE][Nv]; texture<char, hipTextureType3D> index_tex; hipArray* index_texArray=0; #include "window.hpp" struct AsyncMPIexch{ int even,ix,t0,Nt,mpirank; bool do_run; sem_t sem_mpi, sem_calc; void exch(const int _even, const int _ix, const int _t0, const int _Nt, const int _mpirank) { even=_even; ix=_ix; t0=_t0; Nt=_Nt, mpirank=_mpirank; if(sem_post(&sem_mpi)<0) printf("exch sem_post error %d\n",errno); } void exch_sync(){ if(sem_wait(&sem_calc)<0) printf("exch_sync sem error %d\n",errno); } void run() { if(sem_wait(&sem_mpi)<0) printf("run sem_wait error %d\n",errno); if(do_run==0) return; if(even==0) for(int ixrag=ix; ixrag<ix+Nt-t0; ixrag++) DiamondRag::SendMPIp(mpirank, ixrag); if(even==1) for(int ixrag=ix; ixrag<ix+Nt-t0; ixrag++) DiamondRag::SendMPIm(mpirank, ixrag); if(sem_post(&sem_calc)<0) printf("run sem_post error %d\n",errno);; } } ampi_exch; #define IFPMLS(func,a,b,c,d,args) {/*printf(#func" idev=%d ix=%d iym=%d Nblocks=%d\n", idev,ix, iym, a);*/if(isPMLs)hipLaunchKernelGGL(( PMLS##func), dim3(a),dim3(b),c,d, args; else func, dim3(a),dim3(b),c,d, args; } //#define IFPMLSfunc,a,b,c,d,args) { if(!isPMLs)hipLaunchKernelGGL(( func), dim3(a),dim3(b),c,d, args; } //#define IFPMLSfunc,a,b,c,d,args) func<<<a,b,c,d>>>args; template<int even> inline void Window::Dtorre(int ix, int Nt, int t0, double disbal[NDev], bool isPMLs, bool isTFSF) { if(Nt<=t0 || Nt<=0) return; DEBUG_PRINT(("Dtorre%d isPMLs=%d isTFSF=%d ix=%d, t0=%d Nt=%d wleft=%d\n", even, isPMLs, isTFSF, ix,t0,Nt, parsHost.wleft)); const int Nth=Nv; double tt1 = omp_get_wtime(); CHECK_ERROR( hipSetDevice(0) ); hipStream_t stI ; CHECK_ERROR( hipStreamCreate(&stI ) ); hipStream_t stPMLm; CHECK_ERROR( hipStreamCreate(&stPMLm) ); hipStream_t stB; CHECK_ERROR( hipStreamCreate(&stB) ); hipStream_t stDm[NDev],stDo[NDev]; for(int i=0;i<NDev;i++) { if(i!=0) CHECK_ERROR( hipSetDevice(i) ); CHECK_ERROR( hipStreamCreate(&stDm[i]) ); CHECK_ERROR( hipStreamCreate(&stDo[i]) ); } hipStream_t stPMLp; CHECK_ERROR( hipStreamCreate(&stPMLp) ); hipStream_t stX ; CHECK_ERROR( hipStreamCreate(&stX ) ); hipStream_t stP ; if(even==0) { hipSetDevice(NDev-1); CHECK_ERROR( hipStreamCreate(&stP ) ); } else if(even==1) { hipSetDevice(0 ); CHECK_ERROR( hipStreamCreate(&stP ) ); } CHECK_ERROR( hipSetDevice(0) ); int iym=0, iyp=0; int Nblk=0; iyp++; int Iy=iym, Xy, D1oy[NDev], D0oy[NDev], Dmy[NDev], DmBlk[NDev], Syb,Syt, SybBlk,SytBlk; int is_oneL[NDev], is_oneU[NDev], is_many[NDev], is_I[NDev], is_X[NDev], is_Sb[NDev], is_St[NDev], is_P[NDev], is_B[NDev]; for(int i=0; i<NDev; i++) { is_oneL[i]=0; is_oneU[i]=0; is_many[i]=0; is_I[i]=0; is_X[i]=0; is_Sb[i]=0; is_St[i]=0; is_P[i]=0; is_B[i]=0; } is_I[0]=1; iym=iyp; Nblk=0; while(iyp<Npmly/2) { iyp++; Nblk++; } if(Nblk>0) is_Sb[0]=1; Syb=iym; SybBlk=Nblk; for(int idev=0,nextY=0; idev<NDev; idev++) { nextY+=NStripe[idev]; if(idev==NDev-1) nextY-=max(1,Npmly/2); if(idev!=0) { // Dtorre1 only if(iyp<nextY && even==1) is_oneL[idev]=1; D1oy[idev]=iyp; if(iyp<nextY) iyp++; } iym=iyp; Nblk=0; while(iyp<nextY-(idev==NDev-1?0:1)) { iyp++; Nblk++; } // Main Region if(Nblk>0) is_many[idev]=1; Dmy[idev]=iym, DmBlk[idev]=Nblk; if(idev!=NDev-1) { // Dtorre0 only if(iyp<nextY && even==0) is_oneU[idev]=1; D0oy[idev]=iyp; if(iyp<nextY) iyp++; } } iym=iyp; Nblk=0; while(iyp<Na-1) { iyp++; Nblk++; } if(Nblk>0) is_St[NDev-1]=1; is_X[NDev-1]=1; Syt=iym; SytBlk=Nblk; Xy=iyp; if(subnode!=0) { is_I [0]=0; if(even==1) is_P[0]=1; is_Sb[0]=0; DmBlk[0]+=SybBlk; Dmy[0]=Syb; } if(subnode!=NasyncNodes-1) { is_X [NDev-1]=0; if(even==0) is_P[NDev-1]=1; is_St[NDev-1]=0; DmBlk[NDev-1]+=SytBlk; } is_B[0]=1; Dmy[0]++; DmBlk[0]--; int mpirank = node*NasyncNodes+subnode; for(int idev=0; idev<NDev; idev++) { if(idev!=0) CHECK_ERROR( hipSetDevice(idev) ); if(is_oneL[idev] && even==1 && isTFSF) IFPMLS(torreTFSF1 ,1 ,Nth,0,stDo[idev],(ix,D1oy[idev],Nt,t0)) if(is_oneL[idev] && even==1 && !isTFSF) IFPMLS(torreD1 ,1 ,Nth,0,stDo[idev],(ix,D1oy[idev],Nt,t0)) if(is_oneU[idev] && even==0 && isTFSF) IFPMLS(torreTFSF0 ,1 ,Nth,0,stDo[idev],(ix,D0oy[idev],Nt,t0)) if(is_oneU[idev] && even==0 && !isTFSF) IFPMLS(torreD0 ,1 ,Nth,0,stDo[idev],(ix,D0oy[idev],Nt,t0)) if(is_I[idev] && even==0 && isTFSF) IFPMLS(torreITFSF0,1 ,Nth,0,stI ,(ix,Iy ,Nt,t0)) if(is_I[idev] && even==0 && !isTFSF) IFPMLS(torreI0 ,1 ,Nth,0,stI ,(ix,Iy ,Nt,t0)) if(is_I[idev] && even==1 && isTFSF) IFPMLS(torreITFSF1,1 ,Nth,0,stI ,(ix,Iy ,Nt,t0)) if(is_I[idev] && even==1 && !isTFSF) IFPMLS(torreI1 ,1 ,Nth,0,stI ,(ix,Iy ,Nt,t0)) if(is_X[idev] && even==0 ) IFPMLS(torreX0 ,1 ,Nth,0,stX ,(ix,Xy ,Nt,t0)) if(is_X[idev] && even==1 ) IFPMLS(torreX1 ,1 ,Nth,0,stX ,(ix,Xy ,Nt,t0)) if(is_P[idev] && even==0 ) IFPMLS(torreD0 ,1 ,Nth,0,stP ,(ix,Xy ,Nt,t0)) if(is_P[idev] && even==1 ) IFPMLS(torreD1 ,1 ,Nth,0,stP ,(ix,Iy ,Nt,t0)) if(is_Sb[idev] && even==0 ) IFPMLS(torreS0 ,SybBlk ,Nth,0,stPMLm ,(ix,Syb ,Nt,t0)) if(is_Sb[idev] && even==1 ) IFPMLS(torreS1 ,SybBlk ,Nth,0,stPMLm ,(ix,Syb ,Nt,t0)) if(is_St[idev] && even==0 ) IFPMLS(torreS0 ,SytBlk ,Nth,0,stPMLp ,(ix,Syt ,Nt,t0)) if(is_St[idev] && even==1 ) IFPMLS(torreS1 ,SytBlk ,Nth,0,stPMLp ,(ix,Syt ,Nt,t0)) if(is_B[idev] && even==0 ) IFPMLS(torreB0 ,1 ,Nth,0,stB ,(ix,Dmy[idev]-1,Nt,t0)) if(is_B[idev] && even==1 ) IFPMLS(torreB1 ,1 ,Nth,0,stB ,(ix,Dmy[idev]-1,Nt,t0)) if(is_many[idev] && even==0 && isTFSF ) IFPMLS(torreTFSF0 ,DmBlk[idev],Nth,0,stDm[idev],(ix,Dmy[idev] ,Nt,t0)) if(is_many[idev] && even==1 && isTFSF ) IFPMLS(torreTFSF1 ,DmBlk[idev],Nth,0,stDm[idev],(ix,Dmy[idev] ,Nt,t0)) if(is_many[idev] && even==0 && !isTFSF) IFPMLS(torreD0 ,DmBlk[idev],Nth,0,stDm[idev],(ix,Dmy[idev] ,Nt,t0)) if(is_many[idev] && even==1 && !isTFSF) IFPMLS(torreD1 ,DmBlk[idev],Nth,0,stDm[idev],(ix,Dmy[idev] ,Nt,t0)) if(is_oneL[idev] && even==1 ) for(int ixrag=ix; ixrag<ix+Nt-t0; ixrag++) DiamondRag::copyM(idev, ixrag, stDo[idev]); if(is_oneU[idev] && even==0 ) for(int ixrag=ix; ixrag<ix+Nt-t0; ixrag++) DiamondRag::copyP(idev, ixrag, stDo[idev]); } CHECK_ERROR( hipSetDevice(0) ); if(!doneMemcopy) { if(even==0) MemcopyDtH(ix4copy); if(even==1) MemcopyHtD(ix4copy); CHECK_ERROR( hipStreamSynchronize(streamCopy) ); if(even==1) doneMemcopy=true; } CHECK_ERROR( hipStreamSynchronize(stP ) ); if(NasyncNodes>1) ampi_exch.exch(even, ix, t0, Nt, node*NasyncNodes+subnode); if(even==1) parsHost.drop.save(stPMLm); CHECK_ERROR( hipStreamSynchronize(stPMLm) ); CHECK_ERROR( hipStreamSynchronize(stPMLp) ); CHECK_ERROR( hipStreamSynchronize(stI ) ); CHECK_ERROR( hipStreamSynchronize(stX ) ); CHECK_ERROR( hipStreamSynchronize(stB ) ); for(int i=0;i<NDev;i++) CHECK_ERROR( hipStreamSynchronize(stDo[i]) ); for(int i=0;i<NDev;i++) { double tt=omp_get_wtime(); CHECK_ERROR( hipStreamSynchronize(stDm[i]) ); disbal[i]+=omp_get_wtime()-tt; } CHECK_ERROR( hipStreamDestroy(stPMLm) ); CHECK_ERROR( hipStreamDestroy(stPMLp) ); CHECK_ERROR( hipStreamDestroy(stI ) ); CHECK_ERROR( hipStreamDestroy(stX ) ); CHECK_ERROR( hipStreamDestroy(stB ) ); CHECK_ERROR( hipStreamDestroy(stP ) ); for(int i=0;i<NDev;i++) CHECK_ERROR( hipStreamDestroy(stDo[i]) ); for(int i=0;i<NDev;i++) CHECK_ERROR( hipStreamDestroy(stDm[i]) ); if(NasyncNodes>1) ampi_exch.exch_sync(); } inline void Window::Dtorres(int ix, int Nt, int t0, double disbal[NDev], bool isPMLs, bool isTFSF) { Dtorre<0>(ix,Nt,t0,disbal,isPMLs,isTFSF); //hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); Dtorre<1>(ix,Nt,t0,disbal,isPMLs,isTFSF); //hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); } #ifdef MPI_ON MPI_Request reqSp, reqSm, reqRp, reqRm, reqSp_pml, reqSm_pml, reqRp_pml, reqRm_pml; MPI_Status status; int flag; mpi_message Window::mes[8]; //#define BLOCK_SEND #define MPI_TEST //#define MPI_NUDGE //#define USE_MPI_THREADING #ifdef BLOCK_SEND #define SendMPI(p,sz,tp,rnk,tag,world,req) MPI_Send(p,sz,tp,rnk,tag,world); #define RecvMPI(p,sz,tp,rnk,tag,world,req) MPI_Recv(p,sz,tp,rnk,tag,world,&status); #define doWait 0 #else #ifndef USE_MPI_THREADING #define WaitMPI(nreq,req,st) MPI_Wait(req,st) #define SendMPI(p,sz,tp,rnk,tag,world,req,nreq) MPI_Isend(p,sz,tp,rnk,tag,world,req); #define RecvMPI(p,sz,tp,rnk,tag,world,req,nreq) MPI_Irecv(p,sz,tp,rnk,tag,world,req); #else #define WaitMPI(nreq,req,st) { mpi_message* mes = &window.mes[nreq]; \ int s=pthread_join(mes->mpith,0); if(s!=0) printf("node %d: Error joining thread %ld retcode=%d\n",window.node,mes->mpith,s); } static void* send_func(void* args){ mpi_message *mes = (mpi_message*)args; MPI_Send(mes->buf,mes->count,mes->datatype,mes->dest,mes->tag,mes->comm); return 0; } static void* recv_func(void* args){ mpi_message *mes = (mpi_message*)args; MPI_Status stat; MPI_Recv(mes->buf,mes->count,mes->datatype,mes->dest,mes->tag,mes->comm,&stat); return 0; } #define SendMPI(p,sz,tp,rnk,tag,world,req,nreq) {mpi_message* mes = &window.mes[nreq]; mes->set(p,sz,tp,rnk,tag,world); \ if(pthread_create(&mes->mpith,0,send_func,(void*)mes)!=0) {printf("Error: cannot create thread for MPI_send %d node=%d\n",nreq,window.node); MPI_Abort(MPI_COMM_WORLD, 1);};} #define RecvMPI(p,sz,tp,rnk,tag,world,req,nreq) {mpi_message* mes = &window.mes[nreq]; mes->set(p,sz,tp,rnk,tag,world); \ if(pthread_create(&mes->mpith,0,recv_func,(void*)mes)!=0) {printf("Error: cannot create thread for MPI_recv %d node=%d\n",nreq,window.node); MPI_Abort(MPI_COMM_WORLD, 1);};} #endif//USE_MPI_THREADING #define doWait 1 #endif #endif// MPI_ON int calcStep(){ // CHECK_ERROR( hipDeviceSetSharedMemConfig ( hipSharedMemBankSizeEightByte ) ); if(parsHost.iStep==0) printf("Starting...\n"); cuTimer t0; int torreNum=0; CHECK_ERROR(hipDeviceSynchronize()); #ifdef TEST_RATE for(int ix=Ns-Ntime; ix>0; ix--) { // printf("ix=%d\n",ix); hipLaunchKernelGGL(( torreD0), dim3(Na-2),dim3(Nv), 0, 0, ix, 1, Ntime, 0); hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); hipLaunchKernelGGL(( torreD1), dim3(Na-2),dim3(Nv), 0, 0, ix, 1, Ntime, 0); hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); torreNum++; } #else Window window; window.prepare(); int node_shift=0; for(int inode=0; inode<window.node; inode++) node_shift+= mapNodeSize[inode]; node_shift-= Ns*window.node; int nsize=mapNodeSize[window.node]; int nL=node_shift; int nR=nL+nsize; #ifdef MPI_ON if(parsHost.iStep==0) { int wleftP=nR-Ns; int wleftM=nL; if(window.node!=window.Nprocs-1) { DEBUG_MPI(("Recv P (node %d) wleft=%d\n", window.node, wleftP)); //MPI_Isend(&window.data [wleftP*Na ], Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, window.node+1, 2+0, MPI_COMM_WORLD, &reqSp); //MPI_Isend(&window.dataPMLa[wleftP*Npmly], Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, window.node+1, 2+1, MPI_COMM_WORLD, &reqSp_pml); #ifndef BLOCK_SEND int doSR=1; #ifdef MPI_TEST doSR=0; #endif RecvMPI(&window.data [wleftP*Na ], doSR*Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+0, MPI_COMM_WORLD, &reqRp , 2); RecvMPI(&window.dataPMLa[wleftP*Npmly], doSR*Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+1, MPI_COMM_WORLD, &reqRp_pml, 6); #endif } if(window.node!=0 ) { //DEBUG_MPI(("Send&Recv M (node %d) wleft=%d\n", window.node, wleftM)); //MPI_Isend(&window.data [wleftM*Na ], Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, window.node-1, 2-2, MPI_COMM_WORLD, &reqSm); //MPI_Isend(&window.dataPMLa[wleftM*Npmly], Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, window.node-1, 2-1, MPI_COMM_WORLD, &reqSm_pml); //MPI_Irecv(&window.data [wleftM*Na ], Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, window.node-1, 2+0, MPI_COMM_WORLD, &reqRm); //MPI_Irecv(&window.dataPMLa[wleftM*Npmly], Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, window.node-1, 2+1, MPI_COMM_WORLD, &reqRm_pml); } } #endif while(window.w0+Ns>=0) { // window.Memcopy(); #ifdef MPI_ON #ifdef BLOCK_SEND if( !(parsHost.wleft>=nR && window.node!=window.Nprocs-1 || parsHost.wleft<nL-Ns && window.node!=0) ) { if(parsHost.wleft==nR-1 && window.node!=window.Nprocs-1) { DEBUG_MPI(("bl Recv P(%d) (node %d) wleft=%d tag=%d\n", nR-Ns, window.node, parsHost.wleft, parsHost.iStep+0)); RecvMPI(&window.data [(nR-Ns)*Na ], Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, window.node+1, 2+(parsHost.iStep+0)*2+0, MPI_COMM_WORLD, &reqRp , 2); RecvMPI(&window.dataPMLa[(nR-Ns)*Npmly], Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, window.node+1, 2+(parsHost.iStep+0)*2+1, MPI_COMM_WORLD, &reqRp_pml, 6); DEBUG_MPI(("Ok Recv P(%d) (node %d) wleft=%d tag=%d\n", nR-Ns, window.node, parsHost.wleft, parsHost.iStep+0)); } //if(parsHost.wleft==nR-Ns-Ns && window.node!=window.Nprocs-1) { if(parsHost.wleft==nL+Ns && window.node!=window.Nprocs-1) { DEBUG_MPI(("bl Send P(%d) (node %d) wleft=%d tag=%d\n", nR-Ns, window.node, parsHost.wleft, parsHost.iStep+1)); SendMPI(&window.data [(nR-Ns)*Na ], Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, window.node+1, 2+(parsHost.iStep+1)*2+0, MPI_COMM_WORLD, &reqSp ,0); SendMPI(&window.dataPMLa[(nR-Ns)*Npmly], Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, window.node+1, 2+(parsHost.iStep+1)*2+1, MPI_COMM_WORLD, &reqSp_pml, 4); DEBUG_MPI(("Ok Send P(%d) (node %d) wleft=%d tag=%d\n", nR-Ns, window.node, parsHost.wleft, parsHost.iStep+1)); } if(parsHost.wleft==nL+Ns && window.node!=0 && parsHost.iStep!=0) { DEBUG_MPI(("bl Recv M(%d) (node %d) wleft=%d tag=%d\n", nL, window.node, parsHost.wleft, parsHost.iStep+0)); RecvMPI(&window.data [ nL *Na ], Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, window.node-1, 2+(parsHost.iStep+0)*2+0, MPI_COMM_WORLD, &reqRm , 3); RecvMPI(&window.dataPMLa[ nL *Npmly], Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, window.node-1, 2+(parsHost.iStep+0)*2+1, MPI_COMM_WORLD, &reqRm_pml, 7); DEBUG_MPI(("Ok Recv M(%d) (node %d) wleft=%d tag=%d\n", nL, window.node, parsHost.wleft, parsHost.iStep-1)); } window.calcDtorres(nL,nR, parsHost.wleft<nL && window.node!=0, parsHost.wleft>=nR-Ns && window.node!=window.Nprocs-1); if(parsHost.wleft==nL-Ns && window.node!=0 ) { DEBUG_MPI(("bl Send M(%d) (node %d) wleft=%d tag=%d\n", nL, window.node, parsHost.wleft, parsHost.iStep+0)); SendMPI(&window.data [ nL *Na ], Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, window.node-1, 2+(parsHost.iStep+0)*2+0, MPI_COMM_WORLD, &reqSm , 1); SendMPI(&window.dataPMLa[ nL *Npmly], Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, window.node-1, 2+(parsHost.iStep+0)*2+1, MPI_COMM_WORLD, &reqSm_pml, 5); DEBUG_MPI(("Ok Send M(%d) (node %d) wleft=%d tag=%d\n", nL, window.node, parsHost.wleft, parsHost.iStep+0)); } } #else//BLOCK_SEND not def if( true /*!(parsHost.wleft>=nR && window.node!=window.Nprocs-1 || parsHost.wleft<nL-Ns && window.node!=0)*/ ) { bool doSend[2] = {1,1}; bool doRecv[2] = {1,1}; #ifdef MPI_TEST if(parsHost.iStep -window.node<0) { doSend[0]=0; doSend[1]=0; } if(parsHost.iStep+1-window.node<0) { doRecv[0]=0; doRecv[1]=0; } #endif if(doWait && parsHost.wleft==nR+(Ns-Ntime-1) ) { DEBUG_MPI(("waiting P (node %d) wleft=%d\n", window.node, parsHost.wleft)); if(window.node!=window.Nprocs-1 ) { WaitMPI(2,&reqRp, &status);WaitMPI(6,&reqRp_pml, &status); } if(window.node!=0 && parsHost.iStep!=0) { WaitMPI(1,&reqSm, &status);WaitMPI(5,&reqSm_pml, &status); } } if(parsHost.wleft==nR-Ns-Ns-1 && window.node!=window.Nprocs-1) { DEBUG_MPI(("Send&Recv P(%d) (node %d) wleft=%d\n", parsHost.wleft+Ns, window.node, parsHost.wleft)); SendMPI(&window.data [(nR-Ns)*Na ], doSend[1]*Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+0, MPI_COMM_WORLD, &reqSp ,0); SendMPI(&window.dataPMLa[(nR-Ns)*Npmly], doSend[1]*Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+1, MPI_COMM_WORLD, &reqSp_pml,4); RecvMPI(&window.data [(nR-Ns)*Na ], doRecv[1]*Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+0, MPI_COMM_WORLD, &reqRp ,2); RecvMPI(&window.dataPMLa[(nR-Ns)*Npmly], doRecv[1]*Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+1, MPI_COMM_WORLD, &reqRp_pml,6); } if(doWait && parsHost.wleft==nL+Ns+(Ns-Ntime-1) && parsHost.iStep!=0) { DEBUG_MPI(("waiting M (node %d) wleft=%d\n", window.node, parsHost.wleft)); if(window.node!=0 ) { WaitMPI(3,&reqRm, &status);WaitMPI(7,&reqRm_pml, &status); } if(window.node!=window.Nprocs-1) { WaitMPI(0,&reqSp, &status);WaitMPI(4,&reqSp_pml, &status); } } #ifdef MPI_NUDGE if(doWait && (parsHost.wleft+Ns)%1==0) { if(parsHost.iStep!=0 && window.node!=window.Nprocs-1) { DEBUG_MPI(("testing sendP (node %d) wleft=%d\n", window.node, parsHost.wleft)); MPI_Test(&reqSp, &flag, &status);MPI_Test(&reqSp_pml, &flag, &status); } if( window.node!=window.Nprocs-1) { DEBUG_MPI(("testing recvP (node %d) wleft=%d\n", window.node, parsHost.wleft)); MPI_Test(&reqRp, &flag, &status);MPI_Test(&reqRp_pml, &flag, &status); } if(parsHost.iStep!=0 && window.node!=0 ) { DEBUG_MPI(("testing sendM (node %d) wleft=%d\n", window.node, parsHost.wleft)); MPI_Test(&reqSm, &flag, &status);MPI_Test(&reqSm_pml, &flag, &status); } if(parsHost.iStep!=0 && window.node!=0 ) { DEBUG_MPI(("testing recvM (node %d) wleft=%d\n", window.node, parsHost.wleft)); MPI_Test(&reqRm, &flag, &status);MPI_Test(&reqRm_pml, &flag, &status); } } #endif #ifdef MPI_TEST if(parsHost.iStep-window.node>0) #endif ampi_exch.do_run=1; if(NasyncNodes>1) { if(sem_init(&ampi_exch.sem_calc, 0,0)==-1) printf("Error semaphore init errno=%d\n", errno); if(sem_init(&ampi_exch.sem_mpi , 0,0)==-1) printf("Error semaphore init errno=%d\n", errno); } #pragma omp parallel num_threads(2) { if(omp_get_thread_num()==1) { window.calcDtorres(nL,nR, parsHost.wleft<nL && window.node!=0, parsHost.wleft>=nR-Ns && window.node!=window.Nprocs-1); ampi_exch.do_run=0; if(NasyncNodes>1) if(sem_post(&ampi_exch.sem_mpi)<0) printf("sem_post_mpi end error %d\n",errno); } #pragma omp master if(NasyncNodes>1) { while(ampi_exch.do_run) ampi_exch.run(); if(sem_post(&ampi_exch.sem_calc)<0) printf("sem_post_calc end error %d\n",errno); } } if(NasyncNodes>1) { if(sem_destroy(&ampi_exch.sem_mpi )<0) printf("sem_destroy error %d\n",errno); if(sem_destroy(&ampi_exch.sem_calc)<0) printf("sem_destroy error %d\n",errno); } if(parsHost.wleft==nL-Ns-1 && window.node!=0 ) { DEBUG_MPI(("Send&Recv M(%d) (node %d) wleft=%d\n", parsHost.wleft+Ns+1, window.node, parsHost.wleft)); SendMPI(&window.data [ nL *Na ], doSend[0]*Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep )*2+0, MPI_COMM_WORLD, &reqSm ,1); SendMPI(&window.dataPMLa[ nL *Npmly], doSend[0]*Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep )*2+1, MPI_COMM_WORLD, &reqSm_pml,5); RecvMPI(&window.data [ nL *Na ], doRecv[0]*Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+0, MPI_COMM_WORLD, &reqRm, 3); RecvMPI(&window.dataPMLa[ nL *Npmly], doRecv[0]*Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+1, MPI_COMM_WORLD, &reqRm_pml,7); } } #endif//BLOCK_SEND #else//MPI_ON not def window.calcDtorres(); #endif//MPI_ON window.synchronize(); } parsHost.drop.dump(); #ifndef MPI_TEST if(0 && parsHost.iStep%(10*window.Nprocs)==0) parsHost.drop.sync(); #endif // printf("ix=%d\n",ix); /* int zones[] = {0, Npmlx/2, tfsfSm/dx/NDT-2, tfsfSp/dx/NDT+2, Ns-Npmlx/2, Ns}; int izon=0; Dtorres(max(ix,zones[izon]), min(Ntime,zones[izon+1]-ix), max(zones[izon]-ix,0), true ); izon++; Dtorres(max(ix,zones[izon]), min(Ntime,zones[izon+1]-ix), max(zones[izon]-ix,0), false); izon++; Dtorres(max(ix,zones[izon]), min(Ntime,zones[izon+1]-ix), max(zones[izon]-ix,0), false, ((parsHost.iStep+1)*Ntime*dt<shotpoint.tStop)?true:false); izon++; Dtorres(max(ix,zones[izon]), min(Ntime,zones[izon+1]-ix), max(zones[izon]-ix,0), false); izon++; Dtorres(max(ix,zones[izon]), min(Ntime,zones[izon+1]-ix), max(zones[izon]-ix,0), true );*/ #endif double calcTime=t0.gettime(); double yee_cells = 0; double overhead=0; #ifndef TEST_RATE yee_cells = NDT*NDT*Ntime*(unsigned long long)(Nv*Na)*Np; overhead = window.RAMcopytime/window.GPUcalctime; printf("Step %d /node %d/ subnode %d/: Time %9.09f ms |overhead %3.03f%% | ", parsHost.iStep, window.node, window.subnode, calcTime, 100*overhead); // for(int idev=0;idev<NDev;idev++) printf("%3.03f%% ", 100*window.disbal[idev]/window.GPUcalctime); printf("|rate %9.09f GYee_cells/sec |isTFSF=%d \n", 1.e-9*yee_cells/(calcTime*1.e-3), (parsHost.iStep+1)*Ntime*dt<shotpoint.tStop ); #else yee_cells = NDT*NDT*Ntime*(unsigned long long)(Nv*(Na-2))*torreNum; printf("Step %d: Time %9.09f ms |overhead %3.03f%% |rate %9.09f %d %d %d %d (GYee cells/sec,Nx,Ny,Nz,Ntime) |isTFSF=%d \n", parsHost.iStep, calcTime, 100*overhead, 1.e-9*yee_cells/(calcTime*1.e-3), Nx,Ny,Nz,Ntime, (parsHost.iStep+1)*Ntime*dt<shotpoint.tStop ); #endif #ifdef MPI_ON double AllCalcTime; MPI_Reduce(&calcTime, &AllCalcTime, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if(window.node==0 && 0) printf("===(%3d)===AllCalcTime %9.09f sec |rate %9.09f GYee_cells/sec\n", parsHost.iStep, AllCalcTime*1e-3, 1.e-9*yee_cells/(AllCalcTime*1.e-3) ); #endif parsHost.iStep++; copy2dev(parsHost, pars); return 0; }
4d95934fbcae66b61a26bc32d1e9ba13b54c851e.cu
#include <stdio.h> #include <errno.h> #include <omp.h> #include <semaphore.h> #ifdef MPI_ON #include <mpi.h> #endif #include "chooseV.h" #include "signal.h" int* mapNodeSize; //============================================= ftype* __restrict__ hostKpmlx1; ftype* __restrict__ hostKpmlx2; ftype* __restrict__ hostKpmly1; ftype* __restrict__ hostKpmly2; ftype* __restrict__ hostKpmlz1; ftype* __restrict__ hostKpmlz2; GeoParamsHost parsHost; __constant__ GeoParams pars; __constant__ uint32_t drop_cells[Ns*NDT*Nwarps]; __constant__ int devNStripe[NDev] = STRIPES; __constant__ ftype Kpmlx1[(KNpmlx==0)?1:KNpmlx]; __constant__ ftype Kpmlx2[(KNpmlx==0)?1:KNpmlx]; __constant__ ftype Kpmly1[(KNpmly==0)?1:KNpmly]; __constant__ ftype Kpmly2[(KNpmly==0)?1:KNpmly]; __constant__ ftype Kpmlz1[(KNpmlz==0)?1:KNpmlz]; __constant__ ftype Kpmlz2[(KNpmlz==0)?1:KNpmlz]; //__shared__ ftype2 shared_fld[2][7][Nz]; //__shared__ ftype2 shared_fld[(FTYPESIZE*Nv*28>0xc000)?7:14][Nv]; __shared__ ftype2 shared_fld[SHARED_SIZE][Nv]; texture<char, cudaTextureType3D> index_tex; cudaArray* index_texArray=0; #include "window.hpp" struct AsyncMPIexch{ int even,ix,t0,Nt,mpirank; bool do_run; sem_t sem_mpi, sem_calc; void exch(const int _even, const int _ix, const int _t0, const int _Nt, const int _mpirank) { even=_even; ix=_ix; t0=_t0; Nt=_Nt, mpirank=_mpirank; if(sem_post(&sem_mpi)<0) printf("exch sem_post error %d\n",errno); } void exch_sync(){ if(sem_wait(&sem_calc)<0) printf("exch_sync sem error %d\n",errno); } void run() { if(sem_wait(&sem_mpi)<0) printf("run sem_wait error %d\n",errno); if(do_run==0) return; if(even==0) for(int ixrag=ix; ixrag<ix+Nt-t0; ixrag++) DiamondRag::SendMPIp(mpirank, ixrag); if(even==1) for(int ixrag=ix; ixrag<ix+Nt-t0; ixrag++) DiamondRag::SendMPIm(mpirank, ixrag); if(sem_post(&sem_calc)<0) printf("run sem_post error %d\n",errno);; } } ampi_exch; #define IFPMLS(func,a,b,c,d,args) {/*printf(#func" idev=%d ix=%d iym=%d Nblocks=%d\n", idev,ix, iym, a);*/if(isPMLs) PMLS##func<<<a,b,c,d>>>args; else func<<<a,b,c,d>>>args; } //#define IFPMLS(func,a,b,c,d,args) { if(!isPMLs) func<<<a,b,c,d>>>args; } //#define IFPMLS(func,a,b,c,d,args) func<<<a,b,c,d>>>args; template<int even> inline void Window::Dtorre(int ix, int Nt, int t0, double disbal[NDev], bool isPMLs, bool isTFSF) { if(Nt<=t0 || Nt<=0) return; DEBUG_PRINT(("Dtorre%d isPMLs=%d isTFSF=%d ix=%d, t0=%d Nt=%d wleft=%d\n", even, isPMLs, isTFSF, ix,t0,Nt, parsHost.wleft)); const int Nth=Nv; double tt1 = omp_get_wtime(); CHECK_ERROR( cudaSetDevice(0) ); cudaStream_t stI ; CHECK_ERROR( cudaStreamCreate(&stI ) ); cudaStream_t stPMLm; CHECK_ERROR( cudaStreamCreate(&stPMLm) ); cudaStream_t stB; CHECK_ERROR( cudaStreamCreate(&stB) ); cudaStream_t stDm[NDev],stDo[NDev]; for(int i=0;i<NDev;i++) { if(i!=0) CHECK_ERROR( cudaSetDevice(i) ); CHECK_ERROR( cudaStreamCreate(&stDm[i]) ); CHECK_ERROR( cudaStreamCreate(&stDo[i]) ); } cudaStream_t stPMLp; CHECK_ERROR( cudaStreamCreate(&stPMLp) ); cudaStream_t stX ; CHECK_ERROR( cudaStreamCreate(&stX ) ); cudaStream_t stP ; if(even==0) { cudaSetDevice(NDev-1); CHECK_ERROR( cudaStreamCreate(&stP ) ); } else if(even==1) { cudaSetDevice(0 ); CHECK_ERROR( cudaStreamCreate(&stP ) ); } CHECK_ERROR( cudaSetDevice(0) ); int iym=0, iyp=0; int Nblk=0; iyp++; int Iy=iym, Xy, D1oy[NDev], D0oy[NDev], Dmy[NDev], DmBlk[NDev], Syb,Syt, SybBlk,SytBlk; int is_oneL[NDev], is_oneU[NDev], is_many[NDev], is_I[NDev], is_X[NDev], is_Sb[NDev], is_St[NDev], is_P[NDev], is_B[NDev]; for(int i=0; i<NDev; i++) { is_oneL[i]=0; is_oneU[i]=0; is_many[i]=0; is_I[i]=0; is_X[i]=0; is_Sb[i]=0; is_St[i]=0; is_P[i]=0; is_B[i]=0; } is_I[0]=1; iym=iyp; Nblk=0; while(iyp<Npmly/2) { iyp++; Nblk++; } if(Nblk>0) is_Sb[0]=1; Syb=iym; SybBlk=Nblk; for(int idev=0,nextY=0; idev<NDev; idev++) { nextY+=NStripe[idev]; if(idev==NDev-1) nextY-=max(1,Npmly/2); if(idev!=0) { // Dtorre1 only if(iyp<nextY && even==1) is_oneL[idev]=1; D1oy[idev]=iyp; if(iyp<nextY) iyp++; } iym=iyp; Nblk=0; while(iyp<nextY-(idev==NDev-1?0:1)) { iyp++; Nblk++; } // Main Region if(Nblk>0) is_many[idev]=1; Dmy[idev]=iym, DmBlk[idev]=Nblk; if(idev!=NDev-1) { // Dtorre0 only if(iyp<nextY && even==0) is_oneU[idev]=1; D0oy[idev]=iyp; if(iyp<nextY) iyp++; } } iym=iyp; Nblk=0; while(iyp<Na-1) { iyp++; Nblk++; } if(Nblk>0) is_St[NDev-1]=1; is_X[NDev-1]=1; Syt=iym; SytBlk=Nblk; Xy=iyp; if(subnode!=0) { is_I [0]=0; if(even==1) is_P[0]=1; is_Sb[0]=0; DmBlk[0]+=SybBlk; Dmy[0]=Syb; } if(subnode!=NasyncNodes-1) { is_X [NDev-1]=0; if(even==0) is_P[NDev-1]=1; is_St[NDev-1]=0; DmBlk[NDev-1]+=SytBlk; } is_B[0]=1; Dmy[0]++; DmBlk[0]--; int mpirank = node*NasyncNodes+subnode; for(int idev=0; idev<NDev; idev++) { if(idev!=0) CHECK_ERROR( cudaSetDevice(idev) ); if(is_oneL[idev] && even==1 && isTFSF) IFPMLS(torreTFSF1 ,1 ,Nth,0,stDo[idev],(ix,D1oy[idev],Nt,t0)) if(is_oneL[idev] && even==1 && !isTFSF) IFPMLS(torreD1 ,1 ,Nth,0,stDo[idev],(ix,D1oy[idev],Nt,t0)) if(is_oneU[idev] && even==0 && isTFSF) IFPMLS(torreTFSF0 ,1 ,Nth,0,stDo[idev],(ix,D0oy[idev],Nt,t0)) if(is_oneU[idev] && even==0 && !isTFSF) IFPMLS(torreD0 ,1 ,Nth,0,stDo[idev],(ix,D0oy[idev],Nt,t0)) if(is_I[idev] && even==0 && isTFSF) IFPMLS(torreITFSF0,1 ,Nth,0,stI ,(ix,Iy ,Nt,t0)) if(is_I[idev] && even==0 && !isTFSF) IFPMLS(torreI0 ,1 ,Nth,0,stI ,(ix,Iy ,Nt,t0)) if(is_I[idev] && even==1 && isTFSF) IFPMLS(torreITFSF1,1 ,Nth,0,stI ,(ix,Iy ,Nt,t0)) if(is_I[idev] && even==1 && !isTFSF) IFPMLS(torreI1 ,1 ,Nth,0,stI ,(ix,Iy ,Nt,t0)) if(is_X[idev] && even==0 ) IFPMLS(torreX0 ,1 ,Nth,0,stX ,(ix,Xy ,Nt,t0)) if(is_X[idev] && even==1 ) IFPMLS(torreX1 ,1 ,Nth,0,stX ,(ix,Xy ,Nt,t0)) if(is_P[idev] && even==0 ) IFPMLS(torreD0 ,1 ,Nth,0,stP ,(ix,Xy ,Nt,t0)) if(is_P[idev] && even==1 ) IFPMLS(torreD1 ,1 ,Nth,0,stP ,(ix,Iy ,Nt,t0)) if(is_Sb[idev] && even==0 ) IFPMLS(torreS0 ,SybBlk ,Nth,0,stPMLm ,(ix,Syb ,Nt,t0)) if(is_Sb[idev] && even==1 ) IFPMLS(torreS1 ,SybBlk ,Nth,0,stPMLm ,(ix,Syb ,Nt,t0)) if(is_St[idev] && even==0 ) IFPMLS(torreS0 ,SytBlk ,Nth,0,stPMLp ,(ix,Syt ,Nt,t0)) if(is_St[idev] && even==1 ) IFPMLS(torreS1 ,SytBlk ,Nth,0,stPMLp ,(ix,Syt ,Nt,t0)) if(is_B[idev] && even==0 ) IFPMLS(torreB0 ,1 ,Nth,0,stB ,(ix,Dmy[idev]-1,Nt,t0)) if(is_B[idev] && even==1 ) IFPMLS(torreB1 ,1 ,Nth,0,stB ,(ix,Dmy[idev]-1,Nt,t0)) if(is_many[idev] && even==0 && isTFSF ) IFPMLS(torreTFSF0 ,DmBlk[idev],Nth,0,stDm[idev],(ix,Dmy[idev] ,Nt,t0)) if(is_many[idev] && even==1 && isTFSF ) IFPMLS(torreTFSF1 ,DmBlk[idev],Nth,0,stDm[idev],(ix,Dmy[idev] ,Nt,t0)) if(is_many[idev] && even==0 && !isTFSF) IFPMLS(torreD0 ,DmBlk[idev],Nth,0,stDm[idev],(ix,Dmy[idev] ,Nt,t0)) if(is_many[idev] && even==1 && !isTFSF) IFPMLS(torreD1 ,DmBlk[idev],Nth,0,stDm[idev],(ix,Dmy[idev] ,Nt,t0)) if(is_oneL[idev] && even==1 ) for(int ixrag=ix; ixrag<ix+Nt-t0; ixrag++) DiamondRag::copyM(idev, ixrag, stDo[idev]); if(is_oneU[idev] && even==0 ) for(int ixrag=ix; ixrag<ix+Nt-t0; ixrag++) DiamondRag::copyP(idev, ixrag, stDo[idev]); } CHECK_ERROR( cudaSetDevice(0) ); if(!doneMemcopy) { if(even==0) MemcopyDtH(ix4copy); if(even==1) MemcopyHtD(ix4copy); CHECK_ERROR( cudaStreamSynchronize(streamCopy) ); if(even==1) doneMemcopy=true; } CHECK_ERROR( cudaStreamSynchronize(stP ) ); if(NasyncNodes>1) ampi_exch.exch(even, ix, t0, Nt, node*NasyncNodes+subnode); if(even==1) parsHost.drop.save(stPMLm); CHECK_ERROR( cudaStreamSynchronize(stPMLm) ); CHECK_ERROR( cudaStreamSynchronize(stPMLp) ); CHECK_ERROR( cudaStreamSynchronize(stI ) ); CHECK_ERROR( cudaStreamSynchronize(stX ) ); CHECK_ERROR( cudaStreamSynchronize(stB ) ); for(int i=0;i<NDev;i++) CHECK_ERROR( cudaStreamSynchronize(stDo[i]) ); for(int i=0;i<NDev;i++) { double tt=omp_get_wtime(); CHECK_ERROR( cudaStreamSynchronize(stDm[i]) ); disbal[i]+=omp_get_wtime()-tt; } CHECK_ERROR( cudaStreamDestroy(stPMLm) ); CHECK_ERROR( cudaStreamDestroy(stPMLp) ); CHECK_ERROR( cudaStreamDestroy(stI ) ); CHECK_ERROR( cudaStreamDestroy(stX ) ); CHECK_ERROR( cudaStreamDestroy(stB ) ); CHECK_ERROR( cudaStreamDestroy(stP ) ); for(int i=0;i<NDev;i++) CHECK_ERROR( cudaStreamDestroy(stDo[i]) ); for(int i=0;i<NDev;i++) CHECK_ERROR( cudaStreamDestroy(stDm[i]) ); if(NasyncNodes>1) ampi_exch.exch_sync(); } inline void Window::Dtorres(int ix, int Nt, int t0, double disbal[NDev], bool isPMLs, bool isTFSF) { Dtorre<0>(ix,Nt,t0,disbal,isPMLs,isTFSF); //cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); Dtorre<1>(ix,Nt,t0,disbal,isPMLs,isTFSF); //cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); } #ifdef MPI_ON MPI_Request reqSp, reqSm, reqRp, reqRm, reqSp_pml, reqSm_pml, reqRp_pml, reqRm_pml; MPI_Status status; int flag; mpi_message Window::mes[8]; //#define BLOCK_SEND #define MPI_TEST //#define MPI_NUDGE //#define USE_MPI_THREADING #ifdef BLOCK_SEND #define SendMPI(p,sz,tp,rnk,tag,world,req) MPI_Send(p,sz,tp,rnk,tag,world); #define RecvMPI(p,sz,tp,rnk,tag,world,req) MPI_Recv(p,sz,tp,rnk,tag,world,&status); #define doWait 0 #else #ifndef USE_MPI_THREADING #define WaitMPI(nreq,req,st) MPI_Wait(req,st) #define SendMPI(p,sz,tp,rnk,tag,world,req,nreq) MPI_Isend(p,sz,tp,rnk,tag,world,req); #define RecvMPI(p,sz,tp,rnk,tag,world,req,nreq) MPI_Irecv(p,sz,tp,rnk,tag,world,req); #else #define WaitMPI(nreq,req,st) { mpi_message* mes = &window.mes[nreq]; \ int s=pthread_join(mes->mpith,0); if(s!=0) printf("node %d: Error joining thread %ld retcode=%d\n",window.node,mes->mpith,s); } static void* send_func(void* args){ mpi_message *mes = (mpi_message*)args; MPI_Send(mes->buf,mes->count,mes->datatype,mes->dest,mes->tag,mes->comm); return 0; } static void* recv_func(void* args){ mpi_message *mes = (mpi_message*)args; MPI_Status stat; MPI_Recv(mes->buf,mes->count,mes->datatype,mes->dest,mes->tag,mes->comm,&stat); return 0; } #define SendMPI(p,sz,tp,rnk,tag,world,req,nreq) {mpi_message* mes = &window.mes[nreq]; mes->set(p,sz,tp,rnk,tag,world); \ if(pthread_create(&mes->mpith,0,send_func,(void*)mes)!=0) {printf("Error: cannot create thread for MPI_send %d node=%d\n",nreq,window.node); MPI_Abort(MPI_COMM_WORLD, 1);};} #define RecvMPI(p,sz,tp,rnk,tag,world,req,nreq) {mpi_message* mes = &window.mes[nreq]; mes->set(p,sz,tp,rnk,tag,world); \ if(pthread_create(&mes->mpith,0,recv_func,(void*)mes)!=0) {printf("Error: cannot create thread for MPI_recv %d node=%d\n",nreq,window.node); MPI_Abort(MPI_COMM_WORLD, 1);};} #endif//USE_MPI_THREADING #define doWait 1 #endif #endif// MPI_ON int calcStep(){ // CHECK_ERROR( cudaDeviceSetSharedMemConfig ( cudaSharedMemBankSizeEightByte ) ); if(parsHost.iStep==0) printf("Starting...\n"); cuTimer t0; int torreNum=0; CHECK_ERROR(cudaDeviceSynchronize()); #ifdef TEST_RATE for(int ix=Ns-Ntime; ix>0; ix--) { // printf("ix=%d\n",ix); torreD0<<<Na-2,Nv>>>(ix, 1, Ntime, 0); cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); torreD1<<<Na-2,Nv>>>(ix, 1, Ntime, 0); cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); torreNum++; } #else Window window; window.prepare(); int node_shift=0; for(int inode=0; inode<window.node; inode++) node_shift+= mapNodeSize[inode]; node_shift-= Ns*window.node; int nsize=mapNodeSize[window.node]; int nL=node_shift; int nR=nL+nsize; #ifdef MPI_ON if(parsHost.iStep==0) { int wleftP=nR-Ns; int wleftM=nL; if(window.node!=window.Nprocs-1) { DEBUG_MPI(("Recv P (node %d) wleft=%d\n", window.node, wleftP)); //MPI_Isend(&window.data [wleftP*Na ], Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, window.node+1, 2+0, MPI_COMM_WORLD, &reqSp); //MPI_Isend(&window.dataPMLa[wleftP*Npmly], Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, window.node+1, 2+1, MPI_COMM_WORLD, &reqSp_pml); #ifndef BLOCK_SEND int doSR=1; #ifdef MPI_TEST doSR=0; #endif RecvMPI(&window.data [wleftP*Na ], doSR*Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+0, MPI_COMM_WORLD, &reqRp , 2); RecvMPI(&window.dataPMLa[wleftP*Npmly], doSR*Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+1, MPI_COMM_WORLD, &reqRp_pml, 6); #endif } if(window.node!=0 ) { //DEBUG_MPI(("Send&Recv M (node %d) wleft=%d\n", window.node, wleftM)); //MPI_Isend(&window.data [wleftM*Na ], Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, window.node-1, 2-2, MPI_COMM_WORLD, &reqSm); //MPI_Isend(&window.dataPMLa[wleftM*Npmly], Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, window.node-1, 2-1, MPI_COMM_WORLD, &reqSm_pml); //MPI_Irecv(&window.data [wleftM*Na ], Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, window.node-1, 2+0, MPI_COMM_WORLD, &reqRm); //MPI_Irecv(&window.dataPMLa[wleftM*Npmly], Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, window.node-1, 2+1, MPI_COMM_WORLD, &reqRm_pml); } } #endif while(window.w0+Ns>=0) { // window.Memcopy(); #ifdef MPI_ON #ifdef BLOCK_SEND if( !(parsHost.wleft>=nR && window.node!=window.Nprocs-1 || parsHost.wleft<nL-Ns && window.node!=0) ) { if(parsHost.wleft==nR-1 && window.node!=window.Nprocs-1) { DEBUG_MPI(("bl Recv P(%d) (node %d) wleft=%d tag=%d\n", nR-Ns, window.node, parsHost.wleft, parsHost.iStep+0)); RecvMPI(&window.data [(nR-Ns)*Na ], Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, window.node+1, 2+(parsHost.iStep+0)*2+0, MPI_COMM_WORLD, &reqRp , 2); RecvMPI(&window.dataPMLa[(nR-Ns)*Npmly], Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, window.node+1, 2+(parsHost.iStep+0)*2+1, MPI_COMM_WORLD, &reqRp_pml, 6); DEBUG_MPI(("Ok Recv P(%d) (node %d) wleft=%d tag=%d\n", nR-Ns, window.node, parsHost.wleft, parsHost.iStep+0)); } //if(parsHost.wleft==nR-Ns-Ns && window.node!=window.Nprocs-1) { if(parsHost.wleft==nL+Ns && window.node!=window.Nprocs-1) { DEBUG_MPI(("bl Send P(%d) (node %d) wleft=%d tag=%d\n", nR-Ns, window.node, parsHost.wleft, parsHost.iStep+1)); SendMPI(&window.data [(nR-Ns)*Na ], Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, window.node+1, 2+(parsHost.iStep+1)*2+0, MPI_COMM_WORLD, &reqSp ,0); SendMPI(&window.dataPMLa[(nR-Ns)*Npmly], Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, window.node+1, 2+(parsHost.iStep+1)*2+1, MPI_COMM_WORLD, &reqSp_pml, 4); DEBUG_MPI(("Ok Send P(%d) (node %d) wleft=%d tag=%d\n", nR-Ns, window.node, parsHost.wleft, parsHost.iStep+1)); } if(parsHost.wleft==nL+Ns && window.node!=0 && parsHost.iStep!=0) { DEBUG_MPI(("bl Recv M(%d) (node %d) wleft=%d tag=%d\n", nL, window.node, parsHost.wleft, parsHost.iStep+0)); RecvMPI(&window.data [ nL *Na ], Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, window.node-1, 2+(parsHost.iStep+0)*2+0, MPI_COMM_WORLD, &reqRm , 3); RecvMPI(&window.dataPMLa[ nL *Npmly], Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, window.node-1, 2+(parsHost.iStep+0)*2+1, MPI_COMM_WORLD, &reqRm_pml, 7); DEBUG_MPI(("Ok Recv M(%d) (node %d) wleft=%d tag=%d\n", nL, window.node, parsHost.wleft, parsHost.iStep-1)); } window.calcDtorres(nL,nR, parsHost.wleft<nL && window.node!=0, parsHost.wleft>=nR-Ns && window.node!=window.Nprocs-1); if(parsHost.wleft==nL-Ns && window.node!=0 ) { DEBUG_MPI(("bl Send M(%d) (node %d) wleft=%d tag=%d\n", nL, window.node, parsHost.wleft, parsHost.iStep+0)); SendMPI(&window.data [ nL *Na ], Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, window.node-1, 2+(parsHost.iStep+0)*2+0, MPI_COMM_WORLD, &reqSm , 1); SendMPI(&window.dataPMLa[ nL *Npmly], Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, window.node-1, 2+(parsHost.iStep+0)*2+1, MPI_COMM_WORLD, &reqSm_pml, 5); DEBUG_MPI(("Ok Send M(%d) (node %d) wleft=%d tag=%d\n", nL, window.node, parsHost.wleft, parsHost.iStep+0)); } } #else//BLOCK_SEND not def if( true /*!(parsHost.wleft>=nR && window.node!=window.Nprocs-1 || parsHost.wleft<nL-Ns && window.node!=0)*/ ) { bool doSend[2] = {1,1}; bool doRecv[2] = {1,1}; #ifdef MPI_TEST if(parsHost.iStep -window.node<0) { doSend[0]=0; doSend[1]=0; } if(parsHost.iStep+1-window.node<0) { doRecv[0]=0; doRecv[1]=0; } #endif if(doWait && parsHost.wleft==nR+(Ns-Ntime-1) ) { DEBUG_MPI(("waiting P (node %d) wleft=%d\n", window.node, parsHost.wleft)); if(window.node!=window.Nprocs-1 ) { WaitMPI(2,&reqRp, &status);WaitMPI(6,&reqRp_pml, &status); } if(window.node!=0 && parsHost.iStep!=0) { WaitMPI(1,&reqSm, &status);WaitMPI(5,&reqSm_pml, &status); } } if(parsHost.wleft==nR-Ns-Ns-1 && window.node!=window.Nprocs-1) { DEBUG_MPI(("Send&Recv P(%d) (node %d) wleft=%d\n", parsHost.wleft+Ns, window.node, parsHost.wleft)); SendMPI(&window.data [(nR-Ns)*Na ], doSend[1]*Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+0, MPI_COMM_WORLD, &reqSp ,0); SendMPI(&window.dataPMLa[(nR-Ns)*Npmly], doSend[1]*Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+1, MPI_COMM_WORLD, &reqSp_pml,4); RecvMPI(&window.data [(nR-Ns)*Na ], doRecv[1]*Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+0, MPI_COMM_WORLD, &reqRp ,2); RecvMPI(&window.dataPMLa[(nR-Ns)*Npmly], doRecv[1]*Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, (window.node+1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+1, MPI_COMM_WORLD, &reqRp_pml,6); } if(doWait && parsHost.wleft==nL+Ns+(Ns-Ntime-1) && parsHost.iStep!=0) { DEBUG_MPI(("waiting M (node %d) wleft=%d\n", window.node, parsHost.wleft)); if(window.node!=0 ) { WaitMPI(3,&reqRm, &status);WaitMPI(7,&reqRm_pml, &status); } if(window.node!=window.Nprocs-1) { WaitMPI(0,&reqSp, &status);WaitMPI(4,&reqSp_pml, &status); } } #ifdef MPI_NUDGE if(doWait && (parsHost.wleft+Ns)%1==0) { if(parsHost.iStep!=0 && window.node!=window.Nprocs-1) { DEBUG_MPI(("testing sendP (node %d) wleft=%d\n", window.node, parsHost.wleft)); MPI_Test(&reqSp, &flag, &status);MPI_Test(&reqSp_pml, &flag, &status); } if( window.node!=window.Nprocs-1) { DEBUG_MPI(("testing recvP (node %d) wleft=%d\n", window.node, parsHost.wleft)); MPI_Test(&reqRp, &flag, &status);MPI_Test(&reqRp_pml, &flag, &status); } if(parsHost.iStep!=0 && window.node!=0 ) { DEBUG_MPI(("testing sendM (node %d) wleft=%d\n", window.node, parsHost.wleft)); MPI_Test(&reqSm, &flag, &status);MPI_Test(&reqSm_pml, &flag, &status); } if(parsHost.iStep!=0 && window.node!=0 ) { DEBUG_MPI(("testing recvM (node %d) wleft=%d\n", window.node, parsHost.wleft)); MPI_Test(&reqRm, &flag, &status);MPI_Test(&reqRm_pml, &flag, &status); } } #endif #ifdef MPI_TEST if(parsHost.iStep-window.node>0) #endif ampi_exch.do_run=1; if(NasyncNodes>1) { if(sem_init(&ampi_exch.sem_calc, 0,0)==-1) printf("Error semaphore init errno=%d\n", errno); if(sem_init(&ampi_exch.sem_mpi , 0,0)==-1) printf("Error semaphore init errno=%d\n", errno); } #pragma omp parallel num_threads(2) { if(omp_get_thread_num()==1) { window.calcDtorres(nL,nR, parsHost.wleft<nL && window.node!=0, parsHost.wleft>=nR-Ns && window.node!=window.Nprocs-1); ampi_exch.do_run=0; if(NasyncNodes>1) if(sem_post(&ampi_exch.sem_mpi)<0) printf("sem_post_mpi end error %d\n",errno); } #pragma omp master if(NasyncNodes>1) { while(ampi_exch.do_run) ampi_exch.run(); if(sem_post(&ampi_exch.sem_calc)<0) printf("sem_post_calc end error %d\n",errno); } } if(NasyncNodes>1) { if(sem_destroy(&ampi_exch.sem_mpi )<0) printf("sem_destroy error %d\n",errno); if(sem_destroy(&ampi_exch.sem_calc)<0) printf("sem_destroy error %d\n",errno); } if(parsHost.wleft==nL-Ns-1 && window.node!=0 ) { DEBUG_MPI(("Send&Recv M(%d) (node %d) wleft=%d\n", parsHost.wleft+Ns+1, window.node, parsHost.wleft)); SendMPI(&window.data [ nL *Na ], doSend[0]*Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep )*2+0, MPI_COMM_WORLD, &reqSm ,1); SendMPI(&window.dataPMLa[ nL *Npmly], doSend[0]*Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep )*2+1, MPI_COMM_WORLD, &reqSm_pml,5); RecvMPI(&window.data [ nL *Na ], doRecv[0]*Ns*Na *sizeof(DiamondRag )/sizeof(ftype), MPI_FTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+0, MPI_COMM_WORLD, &reqRm, 3); RecvMPI(&window.dataPMLa[ nL *Npmly], doRecv[0]*Ns*Npmly*sizeof(DiamondRagPML)/sizeof(ftype), MPI_FTYPE, (window.node-1)*NasyncNodes+window.subnode, 2+(parsHost.iStep+1)*2+1, MPI_COMM_WORLD, &reqRm_pml,7); } } #endif//BLOCK_SEND #else//MPI_ON not def window.calcDtorres(); #endif//MPI_ON window.synchronize(); } parsHost.drop.dump(); #ifndef MPI_TEST if(0 && parsHost.iStep%(10*window.Nprocs)==0) parsHost.drop.sync(); #endif // printf("ix=%d\n",ix); /* int zones[] = {0, Npmlx/2, tfsfSm/dx/NDT-2, tfsfSp/dx/NDT+2, Ns-Npmlx/2, Ns}; int izon=0; Dtorres(max(ix,zones[izon]), min(Ntime,zones[izon+1]-ix), max(zones[izon]-ix,0), true ); izon++; Dtorres(max(ix,zones[izon]), min(Ntime,zones[izon+1]-ix), max(zones[izon]-ix,0), false); izon++; Dtorres(max(ix,zones[izon]), min(Ntime,zones[izon+1]-ix), max(zones[izon]-ix,0), false, ((parsHost.iStep+1)*Ntime*dt<shotpoint.tStop)?true:false); izon++; Dtorres(max(ix,zones[izon]), min(Ntime,zones[izon+1]-ix), max(zones[izon]-ix,0), false); izon++; Dtorres(max(ix,zones[izon]), min(Ntime,zones[izon+1]-ix), max(zones[izon]-ix,0), true );*/ #endif double calcTime=t0.gettime(); double yee_cells = 0; double overhead=0; #ifndef TEST_RATE yee_cells = NDT*NDT*Ntime*(unsigned long long)(Nv*Na)*Np; overhead = window.RAMcopytime/window.GPUcalctime; printf("Step %d /node %d/ subnode %d/: Time %9.09f ms |overhead %3.03f%% | ", parsHost.iStep, window.node, window.subnode, calcTime, 100*overhead); // for(int idev=0;idev<NDev;idev++) printf("%3.03f%% ", 100*window.disbal[idev]/window.GPUcalctime); printf("|rate %9.09f GYee_cells/sec |isTFSF=%d \n", 1.e-9*yee_cells/(calcTime*1.e-3), (parsHost.iStep+1)*Ntime*dt<shotpoint.tStop ); #else yee_cells = NDT*NDT*Ntime*(unsigned long long)(Nv*(Na-2))*torreNum; printf("Step %d: Time %9.09f ms |overhead %3.03f%% |rate %9.09f %d %d %d %d (GYee cells/sec,Nx,Ny,Nz,Ntime) |isTFSF=%d \n", parsHost.iStep, calcTime, 100*overhead, 1.e-9*yee_cells/(calcTime*1.e-3), Nx,Ny,Nz,Ntime, (parsHost.iStep+1)*Ntime*dt<shotpoint.tStop ); #endif #ifdef MPI_ON double AllCalcTime; MPI_Reduce(&calcTime, &AllCalcTime, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if(window.node==0 && 0) printf("===(%3d)===AllCalcTime %9.09f sec |rate %9.09f GYee_cells/sec\n", parsHost.iStep, AllCalcTime*1e-3, 1.e-9*yee_cells/(AllCalcTime*1.e-3) ); #endif parsHost.iStep++; copy2dev(parsHost, pars); return 0; }
7239a1e10655b11f3010d8e57581f06ebb7c373e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cmath> #include <cuml/common/logger.hpp> #include <cuml/genetic/common.h> #include <cuml/genetic/node.h> #include <cuml/genetic/program.h> #include <gtest/gtest.h> #include <iostream> #include <raft/core/cudart_utils.hpp> #include <raft/core/handle.hpp> #include <rmm/device_uvector.hpp> #include <test_utils.h> #include <vector> namespace cuml { namespace genetic { class GeneticProgramTest : public ::testing::Test { public: GeneticProgramTest() : d_data(0, hipStream_t(0)), d_y(0, hipStream_t(0)), d_lYpred(0, hipStream_t(0)), d_lY(0, hipStream_t(0)), d_lunitW(0, hipStream_t(0)), d_lW(0, hipStream_t(0)), dx2(0, hipStream_t(0)), dy2(0, hipStream_t(0)), dw2(0, hipStream_t(0)), dyp2(0, hipStream_t(0)), stream(handle.get_stream()) { } protected: void SetUp() override { // Params hyper_params.population_size = 2; hyper_params.random_state = 123; hyper_params.num_features = 3; // X[0] * X[1] + X[2] + 0.5 h_nodes1.push_back(node(node::type::add)); h_nodes1.push_back(node(node::type::add)); h_nodes1.push_back(node(node::type::mul)); h_nodes1.push_back(node(0)); h_nodes1.push_back(node(1)); h_nodes1.push_back(node(2)); h_nodes1.push_back(node(0.5f)); // 0.5*X[1] - 0.4*X[2] h_nodes2.push_back(node(node::type::sub)); h_nodes2.push_back(node(node::type::mul)); h_nodes2.push_back(node(0.5f)); h_nodes2.push_back(node(1)); h_nodes2.push_back(node(node::type::mul)); h_nodes2.push_back(node(0.4f)); h_nodes2.push_back(node(2)); // Programs h_progs.resize(2); h_progs[0].len = h_nodes1.size(); h_progs[0].nodes = new node[h_progs[0].len]; std::copy(h_nodes1.data(), h_nodes1.data() + h_nodes1.size(), h_progs[0].nodes); h_progs[1].len = h_nodes2.size(); h_progs[1].nodes = new node[h_progs[1].len]; std::copy(h_nodes2.data(), h_nodes2.data() + h_nodes2.size(), h_progs[1].nodes); // Loss weights h_lunitW.resize(250, 1.0f); // Smaller input hw2.resize(5, 1.0f); // Device memory d_data.resize(75, stream); d_y.resize(25, stream); d_lYpred.resize(500, stream); d_lY.resize(250, stream); d_lunitW.resize(250, stream); d_lW.resize(250, stream); d_nodes1 = (node*)rmm::mr::get_current_device_resource()->allocate(7 * sizeof(node), stream); d_nodes2 = (node*)rmm::mr::get_current_device_resource()->allocate(7 * sizeof(node), stream); d_progs = (program_t)rmm::mr::get_current_device_resource()->allocate(2 * sizeof(program), stream); RAFT_CUDA_TRY(hipMemcpyAsync( d_lYpred.data(), h_lYpred.data(), 500 * sizeof(float), hipMemcpyHostToDevice, stream)); RAFT_CUDA_TRY(hipMemcpyAsync( d_lY.data(), h_lY.data(), 250 * sizeof(float), hipMemcpyHostToDevice, stream)); RAFT_CUDA_TRY(hipMemcpyAsync( d_lunitW.data(), h_lunitW.data(), 250 * sizeof(float), hipMemcpyHostToDevice, stream)); RAFT_CUDA_TRY(hipMemcpyAsync( d_lW.data(), h_lW.data(), 250 * sizeof(float), hipMemcpyHostToDevice, stream)); RAFT_CUDA_TRY(hipMemcpyAsync( d_data.data(), h_data.data(), 75 * sizeof(float), hipMemcpyHostToDevice, stream)); RAFT_CUDA_TRY( hipMemcpyAsync(d_y.data(), h_y.data(), 25 * sizeof(float), hipMemcpyHostToDevice, stream)); RAFT_CUDA_TRY( hipMemcpyAsync(d_nodes1, h_nodes1.data(), 7 * sizeof(node), hipMemcpyHostToDevice, stream)); RAFT_CUDA_TRY( hipMemcpyAsync(d_nodes2, h_nodes2.data(), 7 * sizeof(node), hipMemcpyHostToDevice, stream)); program tmp(h_progs[0]); delete[] tmp.nodes; tmp.nodes = d_nodes1; RAFT_CUDA_TRY( hipMemcpyAsync(&d_progs[0], &tmp, sizeof(program), hipMemcpyHostToDevice, stream)); tmp.nodes = nullptr; tmp = program(h_progs[1]); delete[] tmp.nodes; tmp.nodes = d_nodes2; RAFT_CUDA_TRY( hipMemcpyAsync(&d_progs[1], &tmp, sizeof(program), hipMemcpyHostToDevice, stream)); tmp.nodes = nullptr; // Small input dx2.resize(15, stream); dy2.resize(5, stream); dw2.resize(5, stream); dyp2.resize(10, stream); RAFT_CUDA_TRY( hipMemcpyAsync(dx2.data(), hx2.data(), 15 * sizeof(float), hipMemcpyHostToDevice, stream)); RAFT_CUDA_TRY( hipMemcpyAsync(dy2.data(), hy2.data(), 5 * sizeof(float), hipMemcpyHostToDevice, stream)); RAFT_CUDA_TRY( hipMemcpyAsync(dw2.data(), hw2.data(), 5 * sizeof(float), hipMemcpyHostToDevice, stream)); RAFT_CUDA_TRY(hipMemcpyAsync( dyp2.data(), hyp2.data(), 10 * sizeof(float), hipMemcpyHostToDevice, stream)); } void TearDown() override { rmm::mr::get_current_device_resource()->deallocate(d_nodes1, 7 * sizeof(node), stream); rmm::mr::get_current_device_resource()->deallocate(d_nodes2, 7 * sizeof(node), stream); rmm::mr::get_current_device_resource()->deallocate(d_progs, 2 * sizeof(program), stream); } raft::handle_t handle; hipStream_t stream; const int n_cols = 3; const int n_progs = 2; const int n_samples = 25; const int n_samples2 = 5; const float tolerance = 0.025f; // assuming upto 2.5% tolerance for results(for now) // 25*3 datapoints generated using numpy // y = X[0] * X[1] + X[2] + 0.5 std::vector<float> h_data{ -0.50446586, -2.06014071, 0.88514116, -2.3015387, 0.83898341, 1.65980218, -0.87785842, 0.31563495, 0.3190391, 0.53035547, 0.30017032, -0.12289023, -1.10061918, -0.0126646, 2.10025514, 1.13376944, -0.88762896, 0.05080775, -0.34934272, 2.18557541, 0.50249434, -0.07557171, -0.52817175, -0.6871727, 0.51292982, -1.44411381, 1.46210794, 0.28558733, 0.86540763, 0.58662319, 0.2344157, -0.17242821, 0.87616892, -0.7612069, -0.26788808, 0.61720311, -0.68372786, 0.58281521, -0.67124613, 0.19091548, -0.38405435, -0.19183555, 1.6924546, -1.1425182, 1.51981682, 0.90159072, 0.48851815, -0.61175641, -0.39675353, 1.25286816, -1.39649634, -0.24937038, 0.93110208, -1.07296862, -0.20889423, -1.11731035, -1.09989127, 0.16003707, 1.74481176, -0.93576943, 0.12015895, 0.90085595, 0.04221375, -0.84520564, -0.63699565, -0.3224172, 0.74204416, -0.74715829, -0.35224985, 1.13162939, 1.14472371, -0.29809284, 1.62434536, -0.69166075, -0.75439794}; std::vector<float> h_y{-0.16799022, -2.76151846, 1.68388718, -2.56473777, 0.78327289, -0.22822666, -0.44852371, 0.9365866, 2.001957, -0.57784534, 0.80542501, 1.48487942, -0.09924385, -0.33670458, 0.26397558, -0.2578463, 1.41232295, -0.16116848, 0.54688057, 4.95330364, 2.09776794, 0.16498901, 2.44745782, 0.08097744, 0.3882355}; // Values for loss function tests (250 values each) std::vector<float> h_lYpred{ 0.06298f, 0.81894f, 0.12176f, 0.17104f, 0.12851f, 0.28721f, 0.85043f, 0.68120f, 0.57074f, 0.21796f, 0.96626f, 0.32337f, 0.21887f, 0.80867f, 0.96438f, 0.20052f, 0.28668f, 0.86931f, 0.71421f, 0.85405f, 0.13916f, 0.00316f, 0.59440f, 0.86299f, 0.67019f, 0.54309f, 0.82629f, 0.94563f, 0.01481f, 0.13665f, 0.77081f, 0.58024f, 0.02538f, 0.36610f, 0.13948f, 0.75034f, 0.80435f, 0.27488f, 0.74165f, 0.02921f, 0.51479f, 0.66415f, 0.27380f, 0.85304f, 0.95767f, 0.22758f, 0.38602f, 0.41555f, 0.53783f, 0.48663f, 0.11103f, 0.69397f, 0.21749f, 0.71930f, 0.28976f, 0.50971f, 0.68532f, 0.97518f, 0.71299f, 0.37629f, 0.56444f, 0.42280f, 0.51921f, 0.84366f, 0.30778f, 0.39493f, 0.74007f, 0.18280f, 0.22621f, 0.63083f, 0.46085f, 0.47259f, 0.65442f, 0.25453f, 0.23058f, 0.17460f, 0.30702f, 0.22421f, 0.37237f, 0.36660f, 0.29702f, 0.65276f, 0.30222f, 0.63844f, 0.99909f, 0.55084f, 0.05066f, 0.18914f, 0.36652f, 0.36765f, 0.93901f, 0.13575f, 0.72582f, 0.20223f, 0.06375f, 0.52581f, 0.77119f, 0.12127f, 0.27800f, 0.04008f, 0.01752f, 0.00394f, 0.68973f, 0.91931f, 0.48011f, 0.48363f, 0.09770f, 0.84381f, 0.80244f, 0.42710f, 0.82164f, 0.63239f, 0.08117f, 0.46195f, 0.49832f, 0.05717f, 0.16886f, 0.22311f, 0.45326f, 0.50748f, 0.19089f, 0.78211f, 0.34272f, 0.38456f, 0.64874f, 0.18216f, 0.64757f, 0.26900f, 0.20780f, 0.87067f, 0.16903f, 0.77285f, 0.70580f, 0.54404f, 0.97395f, 0.52550f, 0.81364f, 0.30085f, 0.36754f, 0.42492f, 0.79470f, 0.31590f, 0.26322f, 0.68332f, 0.96523f, 0.31110f, 0.97029f, 0.80217f, 0.77125f, 0.36302f, 0.13444f, 0.28420f, 0.20442f, 0.89692f, 0.50515f, 0.61952f, 0.48237f, 0.35080f, 0.75606f, 0.85438f, 0.70647f, 0.91793f, 0.24037f, 0.72867f, 0.84713f, 0.39838f, 0.49553f, 0.32876f, 0.22610f, 0.86573f, 0.99232f, 0.71321f, 0.30179f, 0.01941f, 0.84838f, 0.58587f, 0.43339f, 0.29490f, 0.07191f, 0.88531f, 0.26896f, 0.36085f, 0.96043f, 0.70679f, 0.39593f, 0.37642f, 0.76078f, 0.63827f, 0.36346f, 0.12755f, 0.07074f, 0.67744f, 0.35042f, 0.30773f, 0.15577f, 0.64096f, 0.05035f, 0.32882f, 0.33640f, 0.54106f, 0.76279f, 0.00414f, 0.17373f, 0.83551f, 0.18176f, 0.91190f, 0.03559f, 0.31992f, 0.86311f, 0.04054f, 0.49714f, 0.53551f, 0.65316f, 0.15681f, 0.80268f, 0.44978f, 0.26365f, 0.37162f, 0.97630f, 0.82863f, 0.73267f, 0.93207f, 0.47129f, 0.70817f, 0.57300f, 0.34240f, 0.89749f, 0.79844f, 0.67992f, 0.72523f, 0.43319f, 0.07310f, 0.61074f, 0.93830f, 0.90822f, 0.08077f, 0.28048f, 0.04549f, 0.44870f, 0.10337f, 0.93911f, 0.13464f, 0.16080f, 0.94620f, 0.15276f, 0.56239f, 0.38684f, 0.12437f, 0.98149f, 0.80650f, 0.44040f, 0.59698f, 0.82197f, 0.91634f, 0.89667f, 0.96333f, 0.21204f, 0.47457f, 0.95737f, 0.08697f, 0.50921f, 0.58647f, 0.71985f, 0.39455f, 0.73240f, 0.04227f, 0.74879f, 0.34403f, 0.94240f, 0.45158f, 0.83860f, 0.51819f, 0.87374f, 0.70416f, 0.52987f, 0.72727f, 0.53649f, 0.74878f, 0.13247f, 0.91358f, 0.61871f, 0.50048f, 0.04681f, 0.56370f, 0.68393f, 0.51947f, 0.85044f, 0.24416f, 0.39354f, 0.33526f, 0.66574f, 0.65638f, 0.15506f, 0.84167f, 0.84663f, 0.92094f, 0.14140f, 0.69364f, 0.40575f, 0.63543f, 0.35074f, 0.68887f, 0.70662f, 0.90424f, 0.09042f, 0.57486f, 0.52239f, 0.40711f, 0.82103f, 0.08674f, 0.14005f, 0.44922f, 0.81244f, 0.99037f, 0.26577f, 0.64744f, 0.25391f, 0.47913f, 0.09676f, 0.26023f, 0.86098f, 0.24472f, 0.15364f, 0.38980f, 0.02943f, 0.59390f, 0.25683f, 0.38976f, 0.90195f, 0.27418f, 0.45255f, 0.74992f, 0.07155f, 0.95425f, 0.77560f, 0.41618f, 0.27963f, 0.32602f, 0.75690f, 0.09356f, 0.73795f, 0.59604f, 0.97534f, 0.27677f, 0.06770f, 0.59517f, 0.64286f, 0.36224f, 0.22017f, 0.83546f, 0.21461f, 0.24793f, 0.08248f, 0.16668f, 0.74429f, 0.66674f, 0.68034f, 0.34710f, 0.82358f, 0.47555f, 0.50109f, 0.09328f, 0.98566f, 0.99481f, 0.41391f, 0.86833f, 0.38645f, 0.49203f, 0.44547f, 0.55391f, 0.87598f, 0.85542f, 0.56283f, 0.61385f, 0.70564f, 0.29067f, 0.91150f, 0.64787f, 0.18255f, 0.03792f, 0.69633f, 0.29029f, 0.31412f, 0.49111f, 0.34615f, 0.43144f, 0.31616f, 0.15405f, 0.44915f, 0.12777f, 0.09491f, 0.26003f, 0.71537f, 0.19450f, 0.91570f, 0.28420f, 0.77892f, 0.53199f, 0.66034f, 0.01978f, 0.35415f, 0.03664f, 0.42675f, 0.41304f, 0.33804f, 0.11290f, 0.89985f, 0.75959f, 0.59417f, 0.53113f, 0.38898f, 0.76259f, 0.83973f, 0.75809f, 0.65900f, 0.55141f, 0.14175f, 0.44740f, 0.95823f, 0.77612f, 0.48749f, 0.74491f, 0.57491f, 0.59119f, 0.26665f, 0.48599f, 0.85947f, 0.46245f, 0.08129f, 0.00825f, 0.29669f, 0.43499f, 0.47998f, 0.60173f, 0.26611f, 0.01223f, 0.81734f, 0.77892f, 0.79022f, 0.01394f, 0.45596f, 0.45259f, 0.32536f, 0.84229f, 0.43612f, 0.30531f, 0.10670f, 0.57758f, 0.65956f, 0.42007f, 0.32166f, 0.10552f, 0.63558f, 0.17990f, 0.50732f, 0.34599f, 0.16603f, 0.26309f, 0.04098f, 0.15997f, 0.79728f, 0.00528f, 0.35510f, 0.24344f, 0.07018f, 0.22062f, 0.92927f, 0.13373f, 0.50955f, 0.11199f, 0.75728f, 0.62117f, 0.18153f, 0.84993f, 0.04677f, 0.13013f, 0.92211f, 0.95474f, 0.88898f, 0.55561f, 0.22625f, 0.78700f, 0.73659f, 0.97613f, 0.02299f, 0.07724f, 0.78942f, 0.02193f, 0.05320f, 0.92053f, 0.35103f, 0.39305f, 0.24208f, 0.08225f, 0.78460f, 0.52144f, 0.32927f, 0.84725f, 0.36106f, 0.80349f}; std::vector<float> h_lY{ 0.60960f, 0.61090f, 0.41418f, 0.90827f, 0.76181f, 0.31777f, 0.04096f, 0.27290f, 0.56879f, 0.75461f, 0.73555f, 0.41598f, 0.59506f, 0.08768f, 0.99554f, 0.20613f, 0.13546f, 0.32044f, 0.41057f, 0.38501f, 0.27894f, 0.24027f, 0.91171f, 0.26811f, 0.55595f, 0.71153f, 0.69739f, 0.53411f, 0.78365f, 0.60914f, 0.41856f, 0.61688f, 0.28741f, 0.28708f, 0.37029f, 0.47945f, 0.40612f, 0.75762f, 0.91728f, 0.70406f, 0.26717f, 0.71175f, 0.39243f, 0.35904f, 0.38469f, 0.08664f, 0.38611f, 0.35606f, 0.52801f, 0.96986f, 0.84780f, 0.56942f, 0.41712f, 0.17005f, 0.79105f, 0.74347f, 0.83473f, 0.06303f, 0.37864f, 0.66666f, 0.78153f, 0.11061f, 0.33880f, 0.82412f, 0.47141f, 0.53043f, 0.51184f, 0.34172f, 0.57087f, 0.88349f, 0.32870f, 0.11501f, 0.35460f, 0.23630f, 0.37728f, 0.96120f, 0.19871f, 0.78119f, 0.23860f, 0.70615f, 0.46745f, 0.43392f, 0.49967f, 0.39721f, 0.53185f, 0.27827f, 0.14435f, 0.82008f, 0.43275f, 0.82113f, 0.06428f, 0.53528f, 0.21594f, 0.86172f, 0.41172f, 0.96051f, 0.54487f, 0.01971f, 0.71222f, 0.04258f, 0.36715f, 0.24844f, 0.12494f, 0.34132f, 0.87059f, 0.70216f, 0.33533f, 0.10020f, 0.79337f, 0.26059f, 0.81314f, 0.54342f, 0.79115f, 0.71730f, 0.70860f, 0.00998f, 0.64761f, 0.01206f, 0.53463f, 0.94436f, 0.19639f, 0.23296f, 0.55945f, 0.14070f, 0.57765f, 0.50908f, 0.95720f, 0.95611f, 0.12311f, 0.95382f, 0.23116f, 0.36939f, 0.66395f, 0.76282f, 0.16314f, 0.00186f, 0.77662f, 0.58799f, 0.18155f, 0.10355f, 0.45982f, 0.34359f, 0.59476f, 0.72759f, 0.77310f, 0.50736f, 0.43720f, 0.63624f, 0.84569f, 0.73073f, 0.04179f, 0.64806f, 0.19924f, 0.96082f, 0.06270f, 0.27744f, 0.59384f, 0.07317f, 0.10979f, 0.47857f, 0.60274f, 0.54937f, 0.58563f, 0.45247f, 0.84396f, 0.43945f, 0.47719f, 0.40808f, 0.81152f, 0.48558f, 0.21577f, 0.93935f, 0.08222f, 0.43114f, 0.68239f, 0.78870f, 0.24300f, 0.84829f, 0.44764f, 0.57347f, 0.78353f, 0.30614f, 0.39493f, 0.40320f, 0.72849f, 0.39406f, 0.89363f, 0.33323f, 0.38395f, 0.94783f, 0.46082f, 0.30498f, 0.17110f, 0.14083f, 0.48474f, 0.45024f, 0.92586f, 0.77450f, 0.43503f, 0.45188f, 0.80866f, 0.24937f, 0.34205f, 0.35942f, 0.79689f, 0.77224f, 0.14354f, 0.54387f, 0.50787f, 0.31753f, 0.98414f, 0.03261f, 0.89748f, 0.82350f, 0.60235f, 0.00041f, 0.99696f, 0.39894f, 0.52078f, 0.54421f, 0.33405f, 0.81143f, 0.49764f, 0.44993f, 0.37257f, 0.16238f, 0.81337f, 0.51335f, 0.96118f, 0.98901f, 0.95259f, 0.36557f, 0.24654f, 0.99554f, 0.33408f, 0.01734f, 0.85852f, 0.41286f, 0.67371f, 0.93781f, 0.04977f, 0.17298f, 0.91502f, 0.70144f, 0.97356f, 0.12571f, 0.64375f, 0.10033f, 0.36798f, 0.90001f}; // Unitary weights std::vector<float> h_lunitW; // Non-unitary weights std::vector<float> h_lW{ 0.38674f, 0.59870f, 0.36761f, 0.59731f, 0.99057f, 0.24131f, 0.29727f, 0.94112f, 0.78962f, 0.71998f, 0.10983f, 0.33620f, 0.37988f, 0.14344f, 0.37377f, 0.06403f, 0.22877f, 0.21993f, 0.11340f, 0.28554f, 0.45453f, 0.14344f, 0.11715f, 0.23184f, 0.08622f, 0.26746f, 0.49058f, 0.06981f, 0.41885f, 0.04422f, 0.99925f, 0.71709f, 0.11910f, 0.49944f, 0.98116f, 0.66316f, 0.11646f, 0.25202f, 0.93223f, 0.81414f, 0.20446f, 0.23813f, 0.45380f, 0.83618f, 0.95958f, 0.72684f, 0.86808f, 0.96348f, 0.76092f, 0.86071f, 0.44155f, 0.85212f, 0.76185f, 0.51460f, 0.65627f, 0.38269f, 0.08251f, 0.07506f, 0.22281f, 0.05325f, 0.71190f, 0.62834f, 0.19348f, 0.44271f, 0.23677f, 0.81817f, 0.73055f, 0.48816f, 0.57524f, 0.45278f, 0.27998f, 0.35699f, 0.26875f, 0.63546f, 0.50990f, 0.21046f, 0.76892f, 0.74433f, 0.39302f, 0.55071f, 0.24554f, 0.56793f, 0.67852f, 0.43290f, 0.97266f, 0.52475f, 0.88402f, 0.79439f, 0.01496f, 0.46426f, 0.15537f, 0.35364f, 0.42962f, 0.47999f, 0.06357f, 0.78531f, 0.62165f, 0.45226f, 0.84973f, 0.63747f, 0.00593f, 0.31520f, 0.13150f, 0.47776f, 0.56420f, 0.21679f, 0.32107f, 0.62491f, 0.33747f, 0.86599f, 0.82573f, 0.26970f, 0.50087f, 0.86947f, 0.47433f, 0.91848f, 0.19534f, 0.45760f, 0.38407f, 0.18953f, 0.30000f, 0.37964f, 0.42509f, 0.55408f, 0.74500f, 0.44484f, 0.67679f, 0.12214f, 0.68380f, 0.74917f, 0.87429f, 0.04355f, 0.98426f, 0.88845f, 0.88318f, 0.64393f, 0.90849f, 0.87948f, 0.22915f, 0.86887f, 0.58676f, 0.51575f, 0.56549f, 0.41412f, 0.06593f, 0.40484f, 0.72931f, 0.02289f, 0.96391f, 0.61075f, 0.91701f, 0.29698f, 0.37095f, 0.42087f, 0.73251f, 0.93271f, 0.32687f, 0.48981f, 0.01081f, 0.11985f, 0.46962f, 0.02569f, 0.83989f, 0.21767f, 0.82370f, 0.35174f, 0.94939f, 0.46032f, 0.81569f, 0.66635f, 0.07019f, 0.68926f, 0.65628f, 0.19914f, 0.17936f, 0.64540f, 0.09031f, 0.05875f, 0.88790f, 0.83687f, 0.46605f, 0.08537f, 0.49514f, 0.44504f, 0.67687f, 0.28943f, 0.74668f, 0.43207f, 0.70990f, 0.62513f, 0.56137f, 0.94399f, 0.75806f, 0.41840f, 0.38428f, 0.30754f, 0.62633f, 0.23173f, 0.40750f, 0.49968f, 0.05536f, 0.11405f, 0.34185f, 0.36367f, 0.06341f, 0.66834f, 0.42899f, 0.08343f, 0.72266f, 0.33155f, 0.74943f, 0.15387f, 0.02475f, 0.35741f, 0.15806f, 0.35406f, 0.18226f, 0.31042f, 0.36047f, 0.62366f, 0.30036f, 0.66625f, 0.99695f, 0.99472f, 0.06743f, 0.56804f, 0.28185f, 0.77387f, 0.58763f, 0.77824f, 0.03720f, 0.99490f, 0.73720f, 0.93635f, 0.85669f, 0.91634f, 0.26065f, 0.97469f, 0.03867f, 0.52306f, 0.99167f, 0.90332f, 0.88546f, 0.07109f, 0.94168f, 0.10211f, 0.95949f, 0.86314f, 0.59917f, 0.41948f}; // Setup smaller input std::vector<float> hx2 = {0.06298, 0.96626, 0.13916, 0.77081, 0.51479, 0.81894, 0.32337, 0.00316, 0.58024, 0.66415, 0.12176, 0.21887, 0.59440, 0.02538, 0.27380}; std::vector<float> hy2 = {0.11103, 0.69397, 0.21749, 0.71930, 0.28976}; std::vector<float> hyp2 = { 0.67334, 1.03133, 1.09484, 0.97263, 1.1157, 0.36077, 0.07413, -0.23618, 0.27997, 0.22255}; std::vector<float> hw2; // Nodes and programs std::vector<node> h_nodes1; std::vector<node> h_nodes2; std::vector<program> h_progs; // Device ptrs node* d_nodes1; node* d_nodes2; program_t d_progs; rmm::device_uvector<float> d_data; rmm::device_uvector<float> d_y; rmm::device_uvector<float> d_lYpred; rmm::device_uvector<float> d_lY; rmm::device_uvector<float> d_lunitW; rmm::device_uvector<float> d_lW; rmm::device_uvector<float> dx2; rmm::device_uvector<float> dy2; rmm::device_uvector<float> dw2; rmm::device_uvector<float> dyp2; param hyper_params; }; TEST_F(GeneticProgramTest, PearsonCoeff) { raft::CompareApproxAbs<float> compApprox(tolerance); float h_expected_score[2] = {0.09528403f, 0.08269963f}; float h_score[2] = {0.0f, 0.0f}; rmm::device_uvector<float> d_score(2, stream); hyper_params.metric = metric_t::pearson; // Unitary weights compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Unitary weights - small h_expected_score[0] = 0.3247632f; h_expected_score[1] = 0.0796348f; compute_metric( handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Non-unitary weights h_expected_score[0] = 0.14329584f; h_expected_score[1] = 0.09064283f; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } } TEST_F(GeneticProgramTest, SpearmanCoeff) { raft::CompareApproxAbs<float> compApprox(tolerance); float h_score[2] = {0.0f, 0.0f}; rmm::device_uvector<float> d_score(2, stream); hyper_params.metric = metric_t::spearman; // Unitary weights float h_expected_score[2] = {0.09268333f, 0.07529861f}; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Unitary weights - small h_expected_score[0] = 0.10000f; h_expected_score[1] = 0.10000f; compute_metric( handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Non-unitary weights h_expected_score[0] = 0.14072408f; h_expected_score[1] = 0.08157397f; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } } TEST_F(GeneticProgramTest, MeanSquareLoss) { raft::CompareApprox<float> compApprox(tolerance); float h_score[2] = {0.0f, 0.0f}; rmm::device_uvector<float> d_score(2, stream); hyper_params.metric = metric_t::mse; // Unitary weights float h_expected_score[2] = {0.14297023, 0.14242104}; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Unitary weights - small h_expected_score[0] = 0.3892163f; h_expected_score[1] = 0.1699830f; compute_metric( handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Non-unitary weights h_expected_score[0] = 0.13842479f; h_expected_score[1] = 0.14538825f; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } } TEST_F(GeneticProgramTest, MeanAbsoluteLoss) { raft::CompareApprox<float> compApprox(tolerance); float h_score[2] = {0.0f, 0.0f}; rmm::device_uvector<float> d_score(2, stream); hyper_params.metric = metric_t::mae; // Unitary weights - big float h_expected_score[2] = {0.30614017, 0.31275677}; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Unitary weights - small h_expected_score[0] = 0.571255f; h_expected_score[1] = 0.365957f; compute_metric( handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Non-unitary weights -big h_expected_score[0] = 0.29643119f; h_expected_score[1] = 0.31756123f; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } } TEST_F(GeneticProgramTest, RMSLoss) { raft::CompareApprox<float> compApprox(tolerance); float h_score[2] = {0.0f, 0.0f}; rmm::device_uvector<float> d_score(2, stream); hyper_params.metric = metric_t::rmse; // Unitary weights float h_expected_score[2] = {0.37811404, 0.37738713}; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Unitary weights - small h_expected_score[0] = 0.6238720f; h_expected_score[1] = 0.4122899f; compute_metric( handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Non-unitary weights h_expected_score[0] = 0.37205482f; h_expected_score[1] = 0.38129811f; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } } TEST_F(GeneticProgramTest, LogLoss) { raft::CompareApprox<float> compApprox(tolerance); float h_score[2] = {0.0f, 0.0f}; rmm::device_uvector<float> d_score(2, stream); hyper_params.metric = metric_t::logloss; // Unitary weights float h_expected_score[2] = {0.72276, 0.724011}; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Non-unitary weights h_expected_score[0] = 0.715887f; h_expected_score[1] = 0.721293f; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( hipMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } } TEST_F(GeneticProgramTest, ProgramExecution) { raft::CompareApprox<float> compApprox(tolerance); // Enable debug logging ML::Logger::get().setLevel(CUML_LEVEL_INFO); // Allocate memory std::vector<float> h_ypred(n_progs * n_samples, 0.0f); rmm::device_uvector<float> d_ypred(n_progs * n_samples, stream); // Execute programs execute(handle, d_progs, n_samples, n_progs, d_data.data(), d_ypred.data()); RAFT_CUDA_TRY(hipMemcpyAsync(h_ypred.data(), d_ypred.data(), n_progs * n_samples * sizeof(float), hipMemcpyDeviceToHost, stream)); handle.sync_stream(stream); // Check results for (int i = 0; i < n_samples; ++i) { ASSERT_TRUE(compApprox(h_ypred[i], h_y[i])); } for (int i = 0; i < n_samples; ++i) { ASSERT_TRUE(compApprox(h_ypred[n_samples + i], 0.5 * h_data[n_samples + i] - 0.4 * h_data[2 * n_samples + i])); } } TEST_F(GeneticProgramTest, ProgramFitnessScore) { raft::CompareApprox<float> compApprox(tolerance); std::vector<metric_t> all_metrics = { metric_t::mae, metric_t::mse, metric_t::rmse, metric_t::pearson, metric_t::spearman}; std::vector<float> hexpscores = { 0.57126, 0.36596, 0.38922, 0.16998, 0.62387, 0.41229, 0.32476, 0.07963, 0.10000, 0.10000}; std::vector<float> hactualscores(10); rmm::device_uvector<float> dactualscores(10, stream); // Start execution for all metrics for (int i = 0; i < 5; ++i) { hyper_params.metric = all_metrics[i]; find_batched_fitness(handle, n_progs, d_progs, dactualscores.data() + 2 * i, hyper_params, n_samples2, dx2.data(), dy2.data(), dw2.data()); handle.sync_stream(stream); } RAFT_CUDA_TRY(hipMemcpyAsync(hactualscores.data(), dactualscores.data(), 10 * sizeof(float), hipMemcpyDeviceToHost, stream)); std::copy( hactualscores.begin(), hactualscores.end(), std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 10; ++i) { ASSERT_TRUE(compApprox(std::abs(hactualscores[i]), hexpscores[i])); } } } // namespace genetic } // namespace cuml
7239a1e10655b11f3010d8e57581f06ebb7c373e.cu
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cmath> #include <cuml/common/logger.hpp> #include <cuml/genetic/common.h> #include <cuml/genetic/node.h> #include <cuml/genetic/program.h> #include <gtest/gtest.h> #include <iostream> #include <raft/core/cudart_utils.hpp> #include <raft/core/handle.hpp> #include <rmm/device_uvector.hpp> #include <test_utils.h> #include <vector> namespace cuml { namespace genetic { class GeneticProgramTest : public ::testing::Test { public: GeneticProgramTest() : d_data(0, cudaStream_t(0)), d_y(0, cudaStream_t(0)), d_lYpred(0, cudaStream_t(0)), d_lY(0, cudaStream_t(0)), d_lunitW(0, cudaStream_t(0)), d_lW(0, cudaStream_t(0)), dx2(0, cudaStream_t(0)), dy2(0, cudaStream_t(0)), dw2(0, cudaStream_t(0)), dyp2(0, cudaStream_t(0)), stream(handle.get_stream()) { } protected: void SetUp() override { // Params hyper_params.population_size = 2; hyper_params.random_state = 123; hyper_params.num_features = 3; // X[0] * X[1] + X[2] + 0.5 h_nodes1.push_back(node(node::type::add)); h_nodes1.push_back(node(node::type::add)); h_nodes1.push_back(node(node::type::mul)); h_nodes1.push_back(node(0)); h_nodes1.push_back(node(1)); h_nodes1.push_back(node(2)); h_nodes1.push_back(node(0.5f)); // 0.5*X[1] - 0.4*X[2] h_nodes2.push_back(node(node::type::sub)); h_nodes2.push_back(node(node::type::mul)); h_nodes2.push_back(node(0.5f)); h_nodes2.push_back(node(1)); h_nodes2.push_back(node(node::type::mul)); h_nodes2.push_back(node(0.4f)); h_nodes2.push_back(node(2)); // Programs h_progs.resize(2); h_progs[0].len = h_nodes1.size(); h_progs[0].nodes = new node[h_progs[0].len]; std::copy(h_nodes1.data(), h_nodes1.data() + h_nodes1.size(), h_progs[0].nodes); h_progs[1].len = h_nodes2.size(); h_progs[1].nodes = new node[h_progs[1].len]; std::copy(h_nodes2.data(), h_nodes2.data() + h_nodes2.size(), h_progs[1].nodes); // Loss weights h_lunitW.resize(250, 1.0f); // Smaller input hw2.resize(5, 1.0f); // Device memory d_data.resize(75, stream); d_y.resize(25, stream); d_lYpred.resize(500, stream); d_lY.resize(250, stream); d_lunitW.resize(250, stream); d_lW.resize(250, stream); d_nodes1 = (node*)rmm::mr::get_current_device_resource()->allocate(7 * sizeof(node), stream); d_nodes2 = (node*)rmm::mr::get_current_device_resource()->allocate(7 * sizeof(node), stream); d_progs = (program_t)rmm::mr::get_current_device_resource()->allocate(2 * sizeof(program), stream); RAFT_CUDA_TRY(cudaMemcpyAsync( d_lYpred.data(), h_lYpred.data(), 500 * sizeof(float), cudaMemcpyHostToDevice, stream)); RAFT_CUDA_TRY(cudaMemcpyAsync( d_lY.data(), h_lY.data(), 250 * sizeof(float), cudaMemcpyHostToDevice, stream)); RAFT_CUDA_TRY(cudaMemcpyAsync( d_lunitW.data(), h_lunitW.data(), 250 * sizeof(float), cudaMemcpyHostToDevice, stream)); RAFT_CUDA_TRY(cudaMemcpyAsync( d_lW.data(), h_lW.data(), 250 * sizeof(float), cudaMemcpyHostToDevice, stream)); RAFT_CUDA_TRY(cudaMemcpyAsync( d_data.data(), h_data.data(), 75 * sizeof(float), cudaMemcpyHostToDevice, stream)); RAFT_CUDA_TRY( cudaMemcpyAsync(d_y.data(), h_y.data(), 25 * sizeof(float), cudaMemcpyHostToDevice, stream)); RAFT_CUDA_TRY( cudaMemcpyAsync(d_nodes1, h_nodes1.data(), 7 * sizeof(node), cudaMemcpyHostToDevice, stream)); RAFT_CUDA_TRY( cudaMemcpyAsync(d_nodes2, h_nodes2.data(), 7 * sizeof(node), cudaMemcpyHostToDevice, stream)); program tmp(h_progs[0]); delete[] tmp.nodes; tmp.nodes = d_nodes1; RAFT_CUDA_TRY( cudaMemcpyAsync(&d_progs[0], &tmp, sizeof(program), cudaMemcpyHostToDevice, stream)); tmp.nodes = nullptr; tmp = program(h_progs[1]); delete[] tmp.nodes; tmp.nodes = d_nodes2; RAFT_CUDA_TRY( cudaMemcpyAsync(&d_progs[1], &tmp, sizeof(program), cudaMemcpyHostToDevice, stream)); tmp.nodes = nullptr; // Small input dx2.resize(15, stream); dy2.resize(5, stream); dw2.resize(5, stream); dyp2.resize(10, stream); RAFT_CUDA_TRY( cudaMemcpyAsync(dx2.data(), hx2.data(), 15 * sizeof(float), cudaMemcpyHostToDevice, stream)); RAFT_CUDA_TRY( cudaMemcpyAsync(dy2.data(), hy2.data(), 5 * sizeof(float), cudaMemcpyHostToDevice, stream)); RAFT_CUDA_TRY( cudaMemcpyAsync(dw2.data(), hw2.data(), 5 * sizeof(float), cudaMemcpyHostToDevice, stream)); RAFT_CUDA_TRY(cudaMemcpyAsync( dyp2.data(), hyp2.data(), 10 * sizeof(float), cudaMemcpyHostToDevice, stream)); } void TearDown() override { rmm::mr::get_current_device_resource()->deallocate(d_nodes1, 7 * sizeof(node), stream); rmm::mr::get_current_device_resource()->deallocate(d_nodes2, 7 * sizeof(node), stream); rmm::mr::get_current_device_resource()->deallocate(d_progs, 2 * sizeof(program), stream); } raft::handle_t handle; cudaStream_t stream; const int n_cols = 3; const int n_progs = 2; const int n_samples = 25; const int n_samples2 = 5; const float tolerance = 0.025f; // assuming upto 2.5% tolerance for results(for now) // 25*3 datapoints generated using numpy // y = X[0] * X[1] + X[2] + 0.5 std::vector<float> h_data{ -0.50446586, -2.06014071, 0.88514116, -2.3015387, 0.83898341, 1.65980218, -0.87785842, 0.31563495, 0.3190391, 0.53035547, 0.30017032, -0.12289023, -1.10061918, -0.0126646, 2.10025514, 1.13376944, -0.88762896, 0.05080775, -0.34934272, 2.18557541, 0.50249434, -0.07557171, -0.52817175, -0.6871727, 0.51292982, -1.44411381, 1.46210794, 0.28558733, 0.86540763, 0.58662319, 0.2344157, -0.17242821, 0.87616892, -0.7612069, -0.26788808, 0.61720311, -0.68372786, 0.58281521, -0.67124613, 0.19091548, -0.38405435, -0.19183555, 1.6924546, -1.1425182, 1.51981682, 0.90159072, 0.48851815, -0.61175641, -0.39675353, 1.25286816, -1.39649634, -0.24937038, 0.93110208, -1.07296862, -0.20889423, -1.11731035, -1.09989127, 0.16003707, 1.74481176, -0.93576943, 0.12015895, 0.90085595, 0.04221375, -0.84520564, -0.63699565, -0.3224172, 0.74204416, -0.74715829, -0.35224985, 1.13162939, 1.14472371, -0.29809284, 1.62434536, -0.69166075, -0.75439794}; std::vector<float> h_y{-0.16799022, -2.76151846, 1.68388718, -2.56473777, 0.78327289, -0.22822666, -0.44852371, 0.9365866, 2.001957, -0.57784534, 0.80542501, 1.48487942, -0.09924385, -0.33670458, 0.26397558, -0.2578463, 1.41232295, -0.16116848, 0.54688057, 4.95330364, 2.09776794, 0.16498901, 2.44745782, 0.08097744, 0.3882355}; // Values for loss function tests (250 values each) std::vector<float> h_lYpred{ 0.06298f, 0.81894f, 0.12176f, 0.17104f, 0.12851f, 0.28721f, 0.85043f, 0.68120f, 0.57074f, 0.21796f, 0.96626f, 0.32337f, 0.21887f, 0.80867f, 0.96438f, 0.20052f, 0.28668f, 0.86931f, 0.71421f, 0.85405f, 0.13916f, 0.00316f, 0.59440f, 0.86299f, 0.67019f, 0.54309f, 0.82629f, 0.94563f, 0.01481f, 0.13665f, 0.77081f, 0.58024f, 0.02538f, 0.36610f, 0.13948f, 0.75034f, 0.80435f, 0.27488f, 0.74165f, 0.02921f, 0.51479f, 0.66415f, 0.27380f, 0.85304f, 0.95767f, 0.22758f, 0.38602f, 0.41555f, 0.53783f, 0.48663f, 0.11103f, 0.69397f, 0.21749f, 0.71930f, 0.28976f, 0.50971f, 0.68532f, 0.97518f, 0.71299f, 0.37629f, 0.56444f, 0.42280f, 0.51921f, 0.84366f, 0.30778f, 0.39493f, 0.74007f, 0.18280f, 0.22621f, 0.63083f, 0.46085f, 0.47259f, 0.65442f, 0.25453f, 0.23058f, 0.17460f, 0.30702f, 0.22421f, 0.37237f, 0.36660f, 0.29702f, 0.65276f, 0.30222f, 0.63844f, 0.99909f, 0.55084f, 0.05066f, 0.18914f, 0.36652f, 0.36765f, 0.93901f, 0.13575f, 0.72582f, 0.20223f, 0.06375f, 0.52581f, 0.77119f, 0.12127f, 0.27800f, 0.04008f, 0.01752f, 0.00394f, 0.68973f, 0.91931f, 0.48011f, 0.48363f, 0.09770f, 0.84381f, 0.80244f, 0.42710f, 0.82164f, 0.63239f, 0.08117f, 0.46195f, 0.49832f, 0.05717f, 0.16886f, 0.22311f, 0.45326f, 0.50748f, 0.19089f, 0.78211f, 0.34272f, 0.38456f, 0.64874f, 0.18216f, 0.64757f, 0.26900f, 0.20780f, 0.87067f, 0.16903f, 0.77285f, 0.70580f, 0.54404f, 0.97395f, 0.52550f, 0.81364f, 0.30085f, 0.36754f, 0.42492f, 0.79470f, 0.31590f, 0.26322f, 0.68332f, 0.96523f, 0.31110f, 0.97029f, 0.80217f, 0.77125f, 0.36302f, 0.13444f, 0.28420f, 0.20442f, 0.89692f, 0.50515f, 0.61952f, 0.48237f, 0.35080f, 0.75606f, 0.85438f, 0.70647f, 0.91793f, 0.24037f, 0.72867f, 0.84713f, 0.39838f, 0.49553f, 0.32876f, 0.22610f, 0.86573f, 0.99232f, 0.71321f, 0.30179f, 0.01941f, 0.84838f, 0.58587f, 0.43339f, 0.29490f, 0.07191f, 0.88531f, 0.26896f, 0.36085f, 0.96043f, 0.70679f, 0.39593f, 0.37642f, 0.76078f, 0.63827f, 0.36346f, 0.12755f, 0.07074f, 0.67744f, 0.35042f, 0.30773f, 0.15577f, 0.64096f, 0.05035f, 0.32882f, 0.33640f, 0.54106f, 0.76279f, 0.00414f, 0.17373f, 0.83551f, 0.18176f, 0.91190f, 0.03559f, 0.31992f, 0.86311f, 0.04054f, 0.49714f, 0.53551f, 0.65316f, 0.15681f, 0.80268f, 0.44978f, 0.26365f, 0.37162f, 0.97630f, 0.82863f, 0.73267f, 0.93207f, 0.47129f, 0.70817f, 0.57300f, 0.34240f, 0.89749f, 0.79844f, 0.67992f, 0.72523f, 0.43319f, 0.07310f, 0.61074f, 0.93830f, 0.90822f, 0.08077f, 0.28048f, 0.04549f, 0.44870f, 0.10337f, 0.93911f, 0.13464f, 0.16080f, 0.94620f, 0.15276f, 0.56239f, 0.38684f, 0.12437f, 0.98149f, 0.80650f, 0.44040f, 0.59698f, 0.82197f, 0.91634f, 0.89667f, 0.96333f, 0.21204f, 0.47457f, 0.95737f, 0.08697f, 0.50921f, 0.58647f, 0.71985f, 0.39455f, 0.73240f, 0.04227f, 0.74879f, 0.34403f, 0.94240f, 0.45158f, 0.83860f, 0.51819f, 0.87374f, 0.70416f, 0.52987f, 0.72727f, 0.53649f, 0.74878f, 0.13247f, 0.91358f, 0.61871f, 0.50048f, 0.04681f, 0.56370f, 0.68393f, 0.51947f, 0.85044f, 0.24416f, 0.39354f, 0.33526f, 0.66574f, 0.65638f, 0.15506f, 0.84167f, 0.84663f, 0.92094f, 0.14140f, 0.69364f, 0.40575f, 0.63543f, 0.35074f, 0.68887f, 0.70662f, 0.90424f, 0.09042f, 0.57486f, 0.52239f, 0.40711f, 0.82103f, 0.08674f, 0.14005f, 0.44922f, 0.81244f, 0.99037f, 0.26577f, 0.64744f, 0.25391f, 0.47913f, 0.09676f, 0.26023f, 0.86098f, 0.24472f, 0.15364f, 0.38980f, 0.02943f, 0.59390f, 0.25683f, 0.38976f, 0.90195f, 0.27418f, 0.45255f, 0.74992f, 0.07155f, 0.95425f, 0.77560f, 0.41618f, 0.27963f, 0.32602f, 0.75690f, 0.09356f, 0.73795f, 0.59604f, 0.97534f, 0.27677f, 0.06770f, 0.59517f, 0.64286f, 0.36224f, 0.22017f, 0.83546f, 0.21461f, 0.24793f, 0.08248f, 0.16668f, 0.74429f, 0.66674f, 0.68034f, 0.34710f, 0.82358f, 0.47555f, 0.50109f, 0.09328f, 0.98566f, 0.99481f, 0.41391f, 0.86833f, 0.38645f, 0.49203f, 0.44547f, 0.55391f, 0.87598f, 0.85542f, 0.56283f, 0.61385f, 0.70564f, 0.29067f, 0.91150f, 0.64787f, 0.18255f, 0.03792f, 0.69633f, 0.29029f, 0.31412f, 0.49111f, 0.34615f, 0.43144f, 0.31616f, 0.15405f, 0.44915f, 0.12777f, 0.09491f, 0.26003f, 0.71537f, 0.19450f, 0.91570f, 0.28420f, 0.77892f, 0.53199f, 0.66034f, 0.01978f, 0.35415f, 0.03664f, 0.42675f, 0.41304f, 0.33804f, 0.11290f, 0.89985f, 0.75959f, 0.59417f, 0.53113f, 0.38898f, 0.76259f, 0.83973f, 0.75809f, 0.65900f, 0.55141f, 0.14175f, 0.44740f, 0.95823f, 0.77612f, 0.48749f, 0.74491f, 0.57491f, 0.59119f, 0.26665f, 0.48599f, 0.85947f, 0.46245f, 0.08129f, 0.00825f, 0.29669f, 0.43499f, 0.47998f, 0.60173f, 0.26611f, 0.01223f, 0.81734f, 0.77892f, 0.79022f, 0.01394f, 0.45596f, 0.45259f, 0.32536f, 0.84229f, 0.43612f, 0.30531f, 0.10670f, 0.57758f, 0.65956f, 0.42007f, 0.32166f, 0.10552f, 0.63558f, 0.17990f, 0.50732f, 0.34599f, 0.16603f, 0.26309f, 0.04098f, 0.15997f, 0.79728f, 0.00528f, 0.35510f, 0.24344f, 0.07018f, 0.22062f, 0.92927f, 0.13373f, 0.50955f, 0.11199f, 0.75728f, 0.62117f, 0.18153f, 0.84993f, 0.04677f, 0.13013f, 0.92211f, 0.95474f, 0.88898f, 0.55561f, 0.22625f, 0.78700f, 0.73659f, 0.97613f, 0.02299f, 0.07724f, 0.78942f, 0.02193f, 0.05320f, 0.92053f, 0.35103f, 0.39305f, 0.24208f, 0.08225f, 0.78460f, 0.52144f, 0.32927f, 0.84725f, 0.36106f, 0.80349f}; std::vector<float> h_lY{ 0.60960f, 0.61090f, 0.41418f, 0.90827f, 0.76181f, 0.31777f, 0.04096f, 0.27290f, 0.56879f, 0.75461f, 0.73555f, 0.41598f, 0.59506f, 0.08768f, 0.99554f, 0.20613f, 0.13546f, 0.32044f, 0.41057f, 0.38501f, 0.27894f, 0.24027f, 0.91171f, 0.26811f, 0.55595f, 0.71153f, 0.69739f, 0.53411f, 0.78365f, 0.60914f, 0.41856f, 0.61688f, 0.28741f, 0.28708f, 0.37029f, 0.47945f, 0.40612f, 0.75762f, 0.91728f, 0.70406f, 0.26717f, 0.71175f, 0.39243f, 0.35904f, 0.38469f, 0.08664f, 0.38611f, 0.35606f, 0.52801f, 0.96986f, 0.84780f, 0.56942f, 0.41712f, 0.17005f, 0.79105f, 0.74347f, 0.83473f, 0.06303f, 0.37864f, 0.66666f, 0.78153f, 0.11061f, 0.33880f, 0.82412f, 0.47141f, 0.53043f, 0.51184f, 0.34172f, 0.57087f, 0.88349f, 0.32870f, 0.11501f, 0.35460f, 0.23630f, 0.37728f, 0.96120f, 0.19871f, 0.78119f, 0.23860f, 0.70615f, 0.46745f, 0.43392f, 0.49967f, 0.39721f, 0.53185f, 0.27827f, 0.14435f, 0.82008f, 0.43275f, 0.82113f, 0.06428f, 0.53528f, 0.21594f, 0.86172f, 0.41172f, 0.96051f, 0.54487f, 0.01971f, 0.71222f, 0.04258f, 0.36715f, 0.24844f, 0.12494f, 0.34132f, 0.87059f, 0.70216f, 0.33533f, 0.10020f, 0.79337f, 0.26059f, 0.81314f, 0.54342f, 0.79115f, 0.71730f, 0.70860f, 0.00998f, 0.64761f, 0.01206f, 0.53463f, 0.94436f, 0.19639f, 0.23296f, 0.55945f, 0.14070f, 0.57765f, 0.50908f, 0.95720f, 0.95611f, 0.12311f, 0.95382f, 0.23116f, 0.36939f, 0.66395f, 0.76282f, 0.16314f, 0.00186f, 0.77662f, 0.58799f, 0.18155f, 0.10355f, 0.45982f, 0.34359f, 0.59476f, 0.72759f, 0.77310f, 0.50736f, 0.43720f, 0.63624f, 0.84569f, 0.73073f, 0.04179f, 0.64806f, 0.19924f, 0.96082f, 0.06270f, 0.27744f, 0.59384f, 0.07317f, 0.10979f, 0.47857f, 0.60274f, 0.54937f, 0.58563f, 0.45247f, 0.84396f, 0.43945f, 0.47719f, 0.40808f, 0.81152f, 0.48558f, 0.21577f, 0.93935f, 0.08222f, 0.43114f, 0.68239f, 0.78870f, 0.24300f, 0.84829f, 0.44764f, 0.57347f, 0.78353f, 0.30614f, 0.39493f, 0.40320f, 0.72849f, 0.39406f, 0.89363f, 0.33323f, 0.38395f, 0.94783f, 0.46082f, 0.30498f, 0.17110f, 0.14083f, 0.48474f, 0.45024f, 0.92586f, 0.77450f, 0.43503f, 0.45188f, 0.80866f, 0.24937f, 0.34205f, 0.35942f, 0.79689f, 0.77224f, 0.14354f, 0.54387f, 0.50787f, 0.31753f, 0.98414f, 0.03261f, 0.89748f, 0.82350f, 0.60235f, 0.00041f, 0.99696f, 0.39894f, 0.52078f, 0.54421f, 0.33405f, 0.81143f, 0.49764f, 0.44993f, 0.37257f, 0.16238f, 0.81337f, 0.51335f, 0.96118f, 0.98901f, 0.95259f, 0.36557f, 0.24654f, 0.99554f, 0.33408f, 0.01734f, 0.85852f, 0.41286f, 0.67371f, 0.93781f, 0.04977f, 0.17298f, 0.91502f, 0.70144f, 0.97356f, 0.12571f, 0.64375f, 0.10033f, 0.36798f, 0.90001f}; // Unitary weights std::vector<float> h_lunitW; // Non-unitary weights std::vector<float> h_lW{ 0.38674f, 0.59870f, 0.36761f, 0.59731f, 0.99057f, 0.24131f, 0.29727f, 0.94112f, 0.78962f, 0.71998f, 0.10983f, 0.33620f, 0.37988f, 0.14344f, 0.37377f, 0.06403f, 0.22877f, 0.21993f, 0.11340f, 0.28554f, 0.45453f, 0.14344f, 0.11715f, 0.23184f, 0.08622f, 0.26746f, 0.49058f, 0.06981f, 0.41885f, 0.04422f, 0.99925f, 0.71709f, 0.11910f, 0.49944f, 0.98116f, 0.66316f, 0.11646f, 0.25202f, 0.93223f, 0.81414f, 0.20446f, 0.23813f, 0.45380f, 0.83618f, 0.95958f, 0.72684f, 0.86808f, 0.96348f, 0.76092f, 0.86071f, 0.44155f, 0.85212f, 0.76185f, 0.51460f, 0.65627f, 0.38269f, 0.08251f, 0.07506f, 0.22281f, 0.05325f, 0.71190f, 0.62834f, 0.19348f, 0.44271f, 0.23677f, 0.81817f, 0.73055f, 0.48816f, 0.57524f, 0.45278f, 0.27998f, 0.35699f, 0.26875f, 0.63546f, 0.50990f, 0.21046f, 0.76892f, 0.74433f, 0.39302f, 0.55071f, 0.24554f, 0.56793f, 0.67852f, 0.43290f, 0.97266f, 0.52475f, 0.88402f, 0.79439f, 0.01496f, 0.46426f, 0.15537f, 0.35364f, 0.42962f, 0.47999f, 0.06357f, 0.78531f, 0.62165f, 0.45226f, 0.84973f, 0.63747f, 0.00593f, 0.31520f, 0.13150f, 0.47776f, 0.56420f, 0.21679f, 0.32107f, 0.62491f, 0.33747f, 0.86599f, 0.82573f, 0.26970f, 0.50087f, 0.86947f, 0.47433f, 0.91848f, 0.19534f, 0.45760f, 0.38407f, 0.18953f, 0.30000f, 0.37964f, 0.42509f, 0.55408f, 0.74500f, 0.44484f, 0.67679f, 0.12214f, 0.68380f, 0.74917f, 0.87429f, 0.04355f, 0.98426f, 0.88845f, 0.88318f, 0.64393f, 0.90849f, 0.87948f, 0.22915f, 0.86887f, 0.58676f, 0.51575f, 0.56549f, 0.41412f, 0.06593f, 0.40484f, 0.72931f, 0.02289f, 0.96391f, 0.61075f, 0.91701f, 0.29698f, 0.37095f, 0.42087f, 0.73251f, 0.93271f, 0.32687f, 0.48981f, 0.01081f, 0.11985f, 0.46962f, 0.02569f, 0.83989f, 0.21767f, 0.82370f, 0.35174f, 0.94939f, 0.46032f, 0.81569f, 0.66635f, 0.07019f, 0.68926f, 0.65628f, 0.19914f, 0.17936f, 0.64540f, 0.09031f, 0.05875f, 0.88790f, 0.83687f, 0.46605f, 0.08537f, 0.49514f, 0.44504f, 0.67687f, 0.28943f, 0.74668f, 0.43207f, 0.70990f, 0.62513f, 0.56137f, 0.94399f, 0.75806f, 0.41840f, 0.38428f, 0.30754f, 0.62633f, 0.23173f, 0.40750f, 0.49968f, 0.05536f, 0.11405f, 0.34185f, 0.36367f, 0.06341f, 0.66834f, 0.42899f, 0.08343f, 0.72266f, 0.33155f, 0.74943f, 0.15387f, 0.02475f, 0.35741f, 0.15806f, 0.35406f, 0.18226f, 0.31042f, 0.36047f, 0.62366f, 0.30036f, 0.66625f, 0.99695f, 0.99472f, 0.06743f, 0.56804f, 0.28185f, 0.77387f, 0.58763f, 0.77824f, 0.03720f, 0.99490f, 0.73720f, 0.93635f, 0.85669f, 0.91634f, 0.26065f, 0.97469f, 0.03867f, 0.52306f, 0.99167f, 0.90332f, 0.88546f, 0.07109f, 0.94168f, 0.10211f, 0.95949f, 0.86314f, 0.59917f, 0.41948f}; // Setup smaller input std::vector<float> hx2 = {0.06298, 0.96626, 0.13916, 0.77081, 0.51479, 0.81894, 0.32337, 0.00316, 0.58024, 0.66415, 0.12176, 0.21887, 0.59440, 0.02538, 0.27380}; std::vector<float> hy2 = {0.11103, 0.69397, 0.21749, 0.71930, 0.28976}; std::vector<float> hyp2 = { 0.67334, 1.03133, 1.09484, 0.97263, 1.1157, 0.36077, 0.07413, -0.23618, 0.27997, 0.22255}; std::vector<float> hw2; // Nodes and programs std::vector<node> h_nodes1; std::vector<node> h_nodes2; std::vector<program> h_progs; // Device ptrs node* d_nodes1; node* d_nodes2; program_t d_progs; rmm::device_uvector<float> d_data; rmm::device_uvector<float> d_y; rmm::device_uvector<float> d_lYpred; rmm::device_uvector<float> d_lY; rmm::device_uvector<float> d_lunitW; rmm::device_uvector<float> d_lW; rmm::device_uvector<float> dx2; rmm::device_uvector<float> dy2; rmm::device_uvector<float> dw2; rmm::device_uvector<float> dyp2; param hyper_params; }; TEST_F(GeneticProgramTest, PearsonCoeff) { raft::CompareApproxAbs<float> compApprox(tolerance); float h_expected_score[2] = {0.09528403f, 0.08269963f}; float h_score[2] = {0.0f, 0.0f}; rmm::device_uvector<float> d_score(2, stream); hyper_params.metric = metric_t::pearson; // Unitary weights compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Unitary weights - small h_expected_score[0] = 0.3247632f; h_expected_score[1] = 0.0796348f; compute_metric( handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Non-unitary weights h_expected_score[0] = 0.14329584f; h_expected_score[1] = 0.09064283f; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } } TEST_F(GeneticProgramTest, SpearmanCoeff) { raft::CompareApproxAbs<float> compApprox(tolerance); float h_score[2] = {0.0f, 0.0f}; rmm::device_uvector<float> d_score(2, stream); hyper_params.metric = metric_t::spearman; // Unitary weights float h_expected_score[2] = {0.09268333f, 0.07529861f}; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Unitary weights - small h_expected_score[0] = 0.10000f; h_expected_score[1] = 0.10000f; compute_metric( handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Non-unitary weights h_expected_score[0] = 0.14072408f; h_expected_score[1] = 0.08157397f; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } } TEST_F(GeneticProgramTest, MeanSquareLoss) { raft::CompareApprox<float> compApprox(tolerance); float h_score[2] = {0.0f, 0.0f}; rmm::device_uvector<float> d_score(2, stream); hyper_params.metric = metric_t::mse; // Unitary weights float h_expected_score[2] = {0.14297023, 0.14242104}; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Unitary weights - small h_expected_score[0] = 0.3892163f; h_expected_score[1] = 0.1699830f; compute_metric( handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Non-unitary weights h_expected_score[0] = 0.13842479f; h_expected_score[1] = 0.14538825f; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } } TEST_F(GeneticProgramTest, MeanAbsoluteLoss) { raft::CompareApprox<float> compApprox(tolerance); float h_score[2] = {0.0f, 0.0f}; rmm::device_uvector<float> d_score(2, stream); hyper_params.metric = metric_t::mae; // Unitary weights - big float h_expected_score[2] = {0.30614017, 0.31275677}; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Unitary weights - small h_expected_score[0] = 0.571255f; h_expected_score[1] = 0.365957f; compute_metric( handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Non-unitary weights -big h_expected_score[0] = 0.29643119f; h_expected_score[1] = 0.31756123f; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } } TEST_F(GeneticProgramTest, RMSLoss) { raft::CompareApprox<float> compApprox(tolerance); float h_score[2] = {0.0f, 0.0f}; rmm::device_uvector<float> d_score(2, stream); hyper_params.metric = metric_t::rmse; // Unitary weights float h_expected_score[2] = {0.37811404, 0.37738713}; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Unitary weights - small h_expected_score[0] = 0.6238720f; h_expected_score[1] = 0.4122899f; compute_metric( handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Non-unitary weights h_expected_score[0] = 0.37205482f; h_expected_score[1] = 0.38129811f; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } } TEST_F(GeneticProgramTest, LogLoss) { raft::CompareApprox<float> compApprox(tolerance); float h_score[2] = {0.0f, 0.0f}; rmm::device_uvector<float> d_score(2, stream); hyper_params.metric = metric_t::logloss; // Unitary weights float h_expected_score[2] = {0.72276, 0.724011}; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } // Non-unitary weights h_expected_score[0] = 0.715887f; h_expected_score[1] = 0.721293f; compute_metric( handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params); RAFT_CUDA_TRY( cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 2; ++i) { ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i])); } } TEST_F(GeneticProgramTest, ProgramExecution) { raft::CompareApprox<float> compApprox(tolerance); // Enable debug logging ML::Logger::get().setLevel(CUML_LEVEL_INFO); // Allocate memory std::vector<float> h_ypred(n_progs * n_samples, 0.0f); rmm::device_uvector<float> d_ypred(n_progs * n_samples, stream); // Execute programs execute(handle, d_progs, n_samples, n_progs, d_data.data(), d_ypred.data()); RAFT_CUDA_TRY(cudaMemcpyAsync(h_ypred.data(), d_ypred.data(), n_progs * n_samples * sizeof(float), cudaMemcpyDeviceToHost, stream)); handle.sync_stream(stream); // Check results for (int i = 0; i < n_samples; ++i) { ASSERT_TRUE(compApprox(h_ypred[i], h_y[i])); } for (int i = 0; i < n_samples; ++i) { ASSERT_TRUE(compApprox(h_ypred[n_samples + i], 0.5 * h_data[n_samples + i] - 0.4 * h_data[2 * n_samples + i])); } } TEST_F(GeneticProgramTest, ProgramFitnessScore) { raft::CompareApprox<float> compApprox(tolerance); std::vector<metric_t> all_metrics = { metric_t::mae, metric_t::mse, metric_t::rmse, metric_t::pearson, metric_t::spearman}; std::vector<float> hexpscores = { 0.57126, 0.36596, 0.38922, 0.16998, 0.62387, 0.41229, 0.32476, 0.07963, 0.10000, 0.10000}; std::vector<float> hactualscores(10); rmm::device_uvector<float> dactualscores(10, stream); // Start execution for all metrics for (int i = 0; i < 5; ++i) { hyper_params.metric = all_metrics[i]; find_batched_fitness(handle, n_progs, d_progs, dactualscores.data() + 2 * i, hyper_params, n_samples2, dx2.data(), dy2.data(), dw2.data()); handle.sync_stream(stream); } RAFT_CUDA_TRY(cudaMemcpyAsync(hactualscores.data(), dactualscores.data(), 10 * sizeof(float), cudaMemcpyDeviceToHost, stream)); std::copy( hactualscores.begin(), hactualscores.end(), std::ostream_iterator<float>(std::cerr, ";")); std::cerr << std::endl; for (int i = 0; i < 10; ++i) { ASSERT_TRUE(compApprox(std::abs(hactualscores[i]), hexpscores[i])); } } } // namespace genetic } // namespace cuml
2a435a576d4bc0735156bb4e69f0fdd05c066f2f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <gmp.h> #include <cassert> #include "cgbn/cgbn.h" #include "utility/support.h" // #define TPI 32 #define TPI 32 #define BITS 1024 #define TPB 128 // the number of threads per block to launch (must be divisible by 32 typedef struct { cgbn_mem_t<BITS> x; cgbn_mem_t<BITS> y; cgbn_mem_t<BITS> m; cgbn_mem_t<BITS> mul_lo; cgbn_mem_t<BITS> mul_hi; cgbn_mem_t<BITS> r; } my_instance_t; typedef cgbn_context_t<TPI> context_t; typedef cgbn_env_t<context_t, BITS> env1024_t; //typedef cgbn_env_t<context_t, 753> env753_t; const uint64_t MNT4_INV = 0xf2044cfbe45e7fff; const uint64_t MNT6_INV = 0xc90776e23fffffff; __device__ void my_redc(uint32_t inv, uint32_t* lo, uint32_t* high, int num_limbs) { __shared__ uint32_t result[24]; if (threadIdx.x > num_limbs) return; // no use for those threads. int threadId = threadIdx.x % num_limbs; } __global__ void my_mont_mul_kernel(my_instance_t *problem_instances, uint32_t instance_count) { context_t bn_context; // create a CGBN context env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math env1024_t::cgbn_t a, b, m; // three 1024-bit values (spread across a warp) env1024_t::cgbn_wide_t mul_wide; // uint32_t np0; int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number if(my_instance>=instance_count) return; // return if my_instance is not valid cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x); cgbn_load(bn1024_env, b, &(problem_instances[my_instance]).y); cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m); // np0 = -cgbn_binary_inverse_ui32(bn1024_env, cgbn_get_ui32(bn1024_env, m)); cgbn_mul_wide(bn1024_env, mul_wide, a, b); cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_lo), mul_wide._low); cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_hi), mul_wide._high); } __global__ void mont_mul_kernel(my_instance_t *problem_instances, uint32_t instance_count) { context_t bn_context, bn_context1; // create a CGBN context env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math env1024_t::cgbn_t a, b, m, r; // three 1024-bit values (spread across a warp) uint32_t np0, np1; int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number if(my_instance>=instance_count) return; // return if my_instance is not valid cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x); cgbn_load(bn1024_env, b, &(problem_instances[my_instance]).y); cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m); np1 = -cgbn_binary_inverse_ui32(bn1024_env, cgbn_get_ui32(bn1024_env, m)); np0 = 0xe45e7fff; printf("\n %08X, computed: %08X\n", np0, np1); cgbn_mont_mul(bn1024_env, r, a, b, m, np0); cgbn_mont2bn(bn1024_env, r, r, m, np0); cgbn_store(bn1024_env, &(problem_instances[my_instance].r), r); //cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_lo), mul_wide._low); //cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_hi), mul_wide._high); } std::vector<uint8_t*>* compute_mont_mulcuda(std::vector<uint8_t*> a, std::vector<uint8_t*> b, uint8_t* input_m_base, int num_bytes) { int num_elements = a.size(); my_instance_t *gpuInstances; my_instance_t* instance_array = (my_instance_t*) malloc(sizeof(my_instance_t) * num_elements); cgbn_error_report_t *report; // create a cgbn_error_report for CGBN to report back errors NEW_CUDA_CHECK(cgbn_error_report_alloc(&report)); for (int i = 0; i < num_elements; i ++) { std::memcpy((void*)instance_array[i].x._limbs, (const void*) a[i], num_bytes); std::memcpy((void*)instance_array[i].y._limbs, (const void*) b[i], num_bytes); std::memcpy((void*)instance_array[i].m._limbs, (const void*) input_m_base, num_bytes); } NEW_CUDA_CHECK(hipSetDevice(0)); NEW_CUDA_CHECK(hipMalloc((void **)&gpuInstances, sizeof(my_instance_t)*num_elements)); NEW_CUDA_CHECK(hipMemcpy(gpuInstances, instance_array, sizeof(my_instance_t)*num_elements, hipMemcpyHostToDevice)); int tpb = TPB; // printf("\n Threads per block =%d", tpb); int IPB = TPB/TPI; int tpi = TPI; // printf("\n Threads per instance = %d", tpi); // printf("\n Instances per block = %d", IPB); uint32_t num_blocks = (num_elements+IPB-1)/IPB; // printf("\n Number of blocks = %d", num_blocks); hipLaunchKernelGGL(( mont_mul_kernel), dim3(8192), dim3(TPB), 0, 0, gpuInstances, num_elements); NEW_CUDA_CHECK(hipDeviceSynchronize()); CGBN_CHECK(report); // copy the instances back from gpuMemory NEW_CUDA_CHECK(hipMemcpy(instance_array, gpuInstances, sizeof(my_instance_t)*num_elements, hipMemcpyDeviceToHost)); std::vector<uint8_t*>* res_vector = new std::vector<uint8_t*>(); for (int i = 0; i < num_elements; i ++) { uint8_t* result = (uint8_t*) malloc(num_bytes * sizeof(uint8_t)); std::memcpy((void*)result, (const void*)instance_array[i].r._limbs, num_bytes); res_vector->emplace_back(result); } free(instance_array); hipFree(gpuInstances); return res_vector; }
2a435a576d4bc0735156bb4e69f0fdd05c066f2f.cu
#include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <cuda.h> #include <gmp.h> #include <cassert> #include "cgbn/cgbn.h" #include "utility/support.h" // #define TPI 32 #define TPI 32 #define BITS 1024 #define TPB 128 // the number of threads per block to launch (must be divisible by 32 typedef struct { cgbn_mem_t<BITS> x; cgbn_mem_t<BITS> y; cgbn_mem_t<BITS> m; cgbn_mem_t<BITS> mul_lo; cgbn_mem_t<BITS> mul_hi; cgbn_mem_t<BITS> r; } my_instance_t; typedef cgbn_context_t<TPI> context_t; typedef cgbn_env_t<context_t, BITS> env1024_t; //typedef cgbn_env_t<context_t, 753> env753_t; const uint64_t MNT4_INV = 0xf2044cfbe45e7fff; const uint64_t MNT6_INV = 0xc90776e23fffffff; __device__ void my_redc(uint32_t inv, uint32_t* lo, uint32_t* high, int num_limbs) { __shared__ uint32_t result[24]; if (threadIdx.x > num_limbs) return; // no use for those threads. int threadId = threadIdx.x % num_limbs; } __global__ void my_mont_mul_kernel(my_instance_t *problem_instances, uint32_t instance_count) { context_t bn_context; // create a CGBN context env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math env1024_t::cgbn_t a, b, m; // three 1024-bit values (spread across a warp) env1024_t::cgbn_wide_t mul_wide; // uint32_t np0; int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number if(my_instance>=instance_count) return; // return if my_instance is not valid cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x); cgbn_load(bn1024_env, b, &(problem_instances[my_instance]).y); cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m); // np0 = -cgbn_binary_inverse_ui32(bn1024_env, cgbn_get_ui32(bn1024_env, m)); cgbn_mul_wide(bn1024_env, mul_wide, a, b); cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_lo), mul_wide._low); cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_hi), mul_wide._high); } __global__ void mont_mul_kernel(my_instance_t *problem_instances, uint32_t instance_count) { context_t bn_context, bn_context1; // create a CGBN context env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math env1024_t::cgbn_t a, b, m, r; // three 1024-bit values (spread across a warp) uint32_t np0, np1; int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number if(my_instance>=instance_count) return; // return if my_instance is not valid cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x); cgbn_load(bn1024_env, b, &(problem_instances[my_instance]).y); cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m); np1 = -cgbn_binary_inverse_ui32(bn1024_env, cgbn_get_ui32(bn1024_env, m)); np0 = 0xe45e7fff; printf("\n %08X, computed: %08X\n", np0, np1); cgbn_mont_mul(bn1024_env, r, a, b, m, np0); cgbn_mont2bn(bn1024_env, r, r, m, np0); cgbn_store(bn1024_env, &(problem_instances[my_instance].r), r); //cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_lo), mul_wide._low); //cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_hi), mul_wide._high); } std::vector<uint8_t*>* compute_mont_mulcuda(std::vector<uint8_t*> a, std::vector<uint8_t*> b, uint8_t* input_m_base, int num_bytes) { int num_elements = a.size(); my_instance_t *gpuInstances; my_instance_t* instance_array = (my_instance_t*) malloc(sizeof(my_instance_t) * num_elements); cgbn_error_report_t *report; // create a cgbn_error_report for CGBN to report back errors NEW_CUDA_CHECK(cgbn_error_report_alloc(&report)); for (int i = 0; i < num_elements; i ++) { std::memcpy((void*)instance_array[i].x._limbs, (const void*) a[i], num_bytes); std::memcpy((void*)instance_array[i].y._limbs, (const void*) b[i], num_bytes); std::memcpy((void*)instance_array[i].m._limbs, (const void*) input_m_base, num_bytes); } NEW_CUDA_CHECK(cudaSetDevice(0)); NEW_CUDA_CHECK(cudaMalloc((void **)&gpuInstances, sizeof(my_instance_t)*num_elements)); NEW_CUDA_CHECK(cudaMemcpy(gpuInstances, instance_array, sizeof(my_instance_t)*num_elements, cudaMemcpyHostToDevice)); int tpb = TPB; // printf("\n Threads per block =%d", tpb); int IPB = TPB/TPI; int tpi = TPI; // printf("\n Threads per instance = %d", tpi); // printf("\n Instances per block = %d", IPB); uint32_t num_blocks = (num_elements+IPB-1)/IPB; // printf("\n Number of blocks = %d", num_blocks); mont_mul_kernel<<<8192, TPB>>>(gpuInstances, num_elements); NEW_CUDA_CHECK(cudaDeviceSynchronize()); CGBN_CHECK(report); // copy the instances back from gpuMemory NEW_CUDA_CHECK(cudaMemcpy(instance_array, gpuInstances, sizeof(my_instance_t)*num_elements, cudaMemcpyDeviceToHost)); std::vector<uint8_t*>* res_vector = new std::vector<uint8_t*>(); for (int i = 0; i < num_elements; i ++) { uint8_t* result = (uint8_t*) malloc(num_bytes * sizeof(uint8_t)); std::memcpy((void*)result, (const void*)instance_array[i].r._limbs, num_bytes); res_vector->emplace_back(result); } free(instance_array); cudaFree(gpuInstances); return res_vector; }
f6762358c9c378b8c7b0dbab53ee1509e782d182.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::add(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_ADD, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::add() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_ADD); layers.push_back(ele); return ele; } Tensor FFModel::subtract(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_SUB, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::subtract() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_SUB); layers.push_back(ele); return ele; } Tensor FFModel::multiply(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_MUL, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::multiply() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_MUL); layers.push_back(ele); return ele; } Tensor FFModel::divide(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_DIV, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::divide() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_DIV); layers.push_back(ele); return ele; } ElementBinary::ElementBinary(FFModel& model, ElementBinary::OpType _op_type, const Tensor& in1, const Tensor& in2) : Op(model, OP_ELEMENTWISE, "ElementBinary_"+std::to_string(_op_type), in1, in2), op_type(_op_type) { //TODO: implement broadcast op numOutputs = 1; numWeights = 0; assert(in1.numDim == in2.numDim); int dim = in1.numDim; outputs[0].numDim = in1.numDim; for (int i = 0; i < dim; i++) { assert(in1.adim[i] == in2.adim[i]); outputs[0].adim[i] = in1.adim[i]; } } ElementBinary::ElementBinary(FFModel& model, ElementBinary::OpType _op_type) : Op(model, OP_ELEMENTWISE, "ElementBinary_"+std::to_string(_op_type), 2), op_type(_op_type) { } Tensor ElementBinary::init_inout(FFModel& model, const Tensor& input) { // TODO: currently disable this functional API since // FlexFlow assumes a single tensor as input assert(false); Tensor in1 = input, in2 = input; inputs[0] = in1; inputs[1] = in2; create_output_and_partition(model); return outputs[0]; } /* void ElementBinary::add_to_model(FFModel& model) { model.layers.push_back(this); } */ void ElementBinary::create_weights(FFModel& model) { // Do nothing } void ElementBinary::create_output_and_partition(FFModel& model) { //TODO: implement broadcast op assert(inputs[0].numDim == inputs[1].numDim); int dim = inputs[0].numDim; for (int i = 0; i < dim; i++) assert(inputs[0].adim[i] == inputs[1].adim[i]); switch (dim) { case 1: { create_output_and_partition_with_dim<1>(model); break; } case 2: { create_output_and_partition_with_dim<2>(model); break; } case 3: { create_output_and_partition_with_dim<3>(model); break; } case 4: { create_output_and_partition_with_dim<4>(model); break; } default: { // Unsupported dim for ElementWiseBinary operator assert(false); } } } template<int NDIM> void ElementBinary::create_output_and_partition_with_dim(FFModel& model) { // Retrive the task indexspace for the op task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is); int dims[NDIM]; for (int i = 0; i < NDIM; i++) dims[i] = inputs[0].adim[NDIM-1-i]; outputs[0] = model.create_tensor<NDIM>(dims, IndexSpaceT<NDIM>(task_is), DT_FLOAT); outputs[0].owner_op = this; outputs[0].owner_idx = 0; Rect<NDIM> input_rect; for (int i = 0; i < 2; i++) { input_rect = runtime->get_index_partition_color_space( ctx, inputs[i].part.get_index_partition()); if (input_rect == part_rect) { input_lps[i] = inputs[i].part; input_grad_lps[i] = inputs[i].part_grad; } else { model.create_disjoint_partition<NDIM>( inputs[i], IndexSpaceT<NDIM>(task_is), input_lps[i], input_grad_lps[i]); } } } __host__ void ElementBinary::init_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) {} void ElementBinary::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(ELEMENTBINARY_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(ElementBinary)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } __global__ void elewise_binary_forward_kernel(coord_t volume, const float alpha, const float beta, ElementBinary::OpType type, const float* in1, const float* in2, float* out) { CUDA_KERNEL_LOOP(i, volume) { switch (type) { case ElementBinary::OP_ADD: { out[i] = alpha * (in1[i] + in2[i]) + beta * out[i]; break; } case ElementBinary::OP_SUB: { out[i] = alpha * (in1[i] - in2[i]) + beta * out[i]; break; } case ElementBinary::OP_MUL: { out[i] = alpha * in1[i] * in2[i] + beta * out[i]; break; } case ElementBinary::OP_DIV: { out[i] = alpha * (in1[i] / in2[i]) + beta * out[i]; break; } default: assert(false); } } } /* regions[0](I): in1 regions[1](I): in2 regions[2](O): output */ __host__ void ElementBinary::forward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { float alpha = 1.0f; float beta = 0.0f; assert(regions.size() == 3); assert(task->regions.size() == 3); const ElementBinary* ele = (const ElementBinary*) task->args; Domain in1_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain in2_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); assert(in1_domain == in2_domain); assert(out_domain == in1_domain); const float* in1_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); const float* in2_ptr = helperGetTensorPointerRO<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); float* out_ptr = helperGetTensorPointerWO<float>( regions[2], task->regions[2], FID_DATA, ctx, runtime); hipLaunchKernelGGL(( elewise_binary_forward_kernel), dim3(GET_BLOCKS(out_domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0, out_domain.get_volume(), alpha, beta, ele->op_type, in1_ptr, in2_ptr, out_ptr); } void ElementBinary::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(ELEMENTBINARY_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(ElementBinary)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } __global__ void elewise_binary_backward_kernel(coord_t volume, const float alpha, const float beta, ElementBinary::OpType type, const float* out_grad, const float* in1, const float* in2, float* in1_grad, float* in2_grad) { CUDA_KERNEL_LOOP(i, volume) { switch (type) { case ElementBinary::OP_ADD: { in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i]; in2_grad[i] = alpha * out_grad[i] + beta * in2_grad[i]; break; } case ElementBinary::OP_SUB: { in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i]; in2_grad[i] = - alpha * out_grad[i] + beta * in2_grad[i]; break; } case ElementBinary::OP_MUL: { in1_grad[i] = alpha * out_grad[i] * in2[i] + beta * in1_grad[i]; in2_grad[i] = alpha * out_grad[i] * in1[i] + beta * in2_grad[i]; break; } case ElementBinary::OP_DIV: { in1_grad[i] = alpha * out_grad[i] / in2[i] + beta * in1_grad[i]; in2_grad[i] = - alpha * out_grad[i] * in1[i] / (in2[i] * in2[i]) + beta * in2_grad[i]; break; } default: assert(false); } } } /* regions[0](I): out_grad regions[1](I): in0 regions[2](I): in1 regions[3](I/O): in0_grad regions[4](I/O): in1_grad */ void ElementBinary::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { float alpha = 1.0f; const ElementBinary* ele = (const ElementBinary*) task->args; assert(regions.size() == 5); assert(task->regions.size() == 5); Domain out_grad_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain in0_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Domain in1_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); Domain in0_grad_domain = runtime->get_index_space_domain( ctx, task->regions[3].region.get_index_space()); Domain in1_grad_domain = runtime->get_index_space_domain( ctx, task->regions[4].region.get_index_space()); assert(out_grad_domain == in0_domain); assert(out_grad_domain == in1_domain); assert(out_grad_domain == in0_grad_domain); assert(out_grad_domain == in1_grad_domain); const float* out_grad_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); const float* in1_ptr = helperGetTensorPointerRO<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); const float* in2_ptr = helperGetTensorPointerRO<float>( regions[2], task->regions[2], FID_DATA, ctx, runtime); float* in1_grad_ptr = helperGetTensorPointerRW<float>( regions[3], task->regions[3], FID_DATA, ctx, runtime); float* in2_grad_ptr = helperGetTensorPointerRW<float>( regions[4], task->regions[4], FID_DATA, ctx, runtime); hipLaunchKernelGGL(( elewise_binary_backward_kernel), dim3(GET_BLOCKS(out_grad_domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0, out_grad_domain.get_volume(), alpha, alpha, ele->op_type, out_grad_ptr, in1_ptr, in2_ptr, in1_grad_ptr, in2_grad_ptr); } void ElementBinary::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(ELEMENTBINARY_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(ElementBinary)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(0, FID_DATA); // regions[1](I): input0 launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(1, FID_DATA); // regions[2](I): input1 launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): input0_grad launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I/O): input1_grad launcher.add_region_requirement( RegionRequirement(input_grad_lps[1], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[1].region_grad)); launcher.add_field(4, FID_DATA); runtime->execute_index_space(ctx, launcher); } bool ElementBinary::measure_compute_time(Simulator* sim, const ParallelConfig& pc, float& forward_time, float& backward_time) { //TODO: implement measure_forward return false; }
f6762358c9c378b8c7b0dbab53ee1509e782d182.cu
/* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::add(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_ADD, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::add() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_ADD); layers.push_back(ele); return ele; } Tensor FFModel::subtract(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_SUB, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::subtract() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_SUB); layers.push_back(ele); return ele; } Tensor FFModel::multiply(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_MUL, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::multiply() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_MUL); layers.push_back(ele); return ele; } Tensor FFModel::divide(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_DIV, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::divide() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_DIV); layers.push_back(ele); return ele; } ElementBinary::ElementBinary(FFModel& model, ElementBinary::OpType _op_type, const Tensor& in1, const Tensor& in2) : Op(model, OP_ELEMENTWISE, "ElementBinary_"+std::to_string(_op_type), in1, in2), op_type(_op_type) { //TODO: implement broadcast op numOutputs = 1; numWeights = 0; assert(in1.numDim == in2.numDim); int dim = in1.numDim; outputs[0].numDim = in1.numDim; for (int i = 0; i < dim; i++) { assert(in1.adim[i] == in2.adim[i]); outputs[0].adim[i] = in1.adim[i]; } } ElementBinary::ElementBinary(FFModel& model, ElementBinary::OpType _op_type) : Op(model, OP_ELEMENTWISE, "ElementBinary_"+std::to_string(_op_type), 2), op_type(_op_type) { } Tensor ElementBinary::init_inout(FFModel& model, const Tensor& input) { // TODO: currently disable this functional API since // FlexFlow assumes a single tensor as input assert(false); Tensor in1 = input, in2 = input; inputs[0] = in1; inputs[1] = in2; create_output_and_partition(model); return outputs[0]; } /* void ElementBinary::add_to_model(FFModel& model) { model.layers.push_back(this); } */ void ElementBinary::create_weights(FFModel& model) { // Do nothing } void ElementBinary::create_output_and_partition(FFModel& model) { //TODO: implement broadcast op assert(inputs[0].numDim == inputs[1].numDim); int dim = inputs[0].numDim; for (int i = 0; i < dim; i++) assert(inputs[0].adim[i] == inputs[1].adim[i]); switch (dim) { case 1: { create_output_and_partition_with_dim<1>(model); break; } case 2: { create_output_and_partition_with_dim<2>(model); break; } case 3: { create_output_and_partition_with_dim<3>(model); break; } case 4: { create_output_and_partition_with_dim<4>(model); break; } default: { // Unsupported dim for ElementWiseBinary operator assert(false); } } } template<int NDIM> void ElementBinary::create_output_and_partition_with_dim(FFModel& model) { // Retrive the task indexspace for the op task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is); int dims[NDIM]; for (int i = 0; i < NDIM; i++) dims[i] = inputs[0].adim[NDIM-1-i]; outputs[0] = model.create_tensor<NDIM>(dims, IndexSpaceT<NDIM>(task_is), DT_FLOAT); outputs[0].owner_op = this; outputs[0].owner_idx = 0; Rect<NDIM> input_rect; for (int i = 0; i < 2; i++) { input_rect = runtime->get_index_partition_color_space( ctx, inputs[i].part.get_index_partition()); if (input_rect == part_rect) { input_lps[i] = inputs[i].part; input_grad_lps[i] = inputs[i].part_grad; } else { model.create_disjoint_partition<NDIM>( inputs[i], IndexSpaceT<NDIM>(task_is), input_lps[i], input_grad_lps[i]); } } } __host__ void ElementBinary::init_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) {} void ElementBinary::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(ELEMENTBINARY_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(ElementBinary)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } __global__ void elewise_binary_forward_kernel(coord_t volume, const float alpha, const float beta, ElementBinary::OpType type, const float* in1, const float* in2, float* out) { CUDA_KERNEL_LOOP(i, volume) { switch (type) { case ElementBinary::OP_ADD: { out[i] = alpha * (in1[i] + in2[i]) + beta * out[i]; break; } case ElementBinary::OP_SUB: { out[i] = alpha * (in1[i] - in2[i]) + beta * out[i]; break; } case ElementBinary::OP_MUL: { out[i] = alpha * in1[i] * in2[i] + beta * out[i]; break; } case ElementBinary::OP_DIV: { out[i] = alpha * (in1[i] / in2[i]) + beta * out[i]; break; } default: assert(false); } } } /* regions[0](I): in1 regions[1](I): in2 regions[2](O): output */ __host__ void ElementBinary::forward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { float alpha = 1.0f; float beta = 0.0f; assert(regions.size() == 3); assert(task->regions.size() == 3); const ElementBinary* ele = (const ElementBinary*) task->args; Domain in1_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain in2_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); assert(in1_domain == in2_domain); assert(out_domain == in1_domain); const float* in1_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); const float* in2_ptr = helperGetTensorPointerRO<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); float* out_ptr = helperGetTensorPointerWO<float>( regions[2], task->regions[2], FID_DATA, ctx, runtime); elewise_binary_forward_kernel<<<GET_BLOCKS(out_domain.get_volume()), CUDA_NUM_THREADS>>>( out_domain.get_volume(), alpha, beta, ele->op_type, in1_ptr, in2_ptr, out_ptr); } void ElementBinary::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(ELEMENTBINARY_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(ElementBinary)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } __global__ void elewise_binary_backward_kernel(coord_t volume, const float alpha, const float beta, ElementBinary::OpType type, const float* out_grad, const float* in1, const float* in2, float* in1_grad, float* in2_grad) { CUDA_KERNEL_LOOP(i, volume) { switch (type) { case ElementBinary::OP_ADD: { in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i]; in2_grad[i] = alpha * out_grad[i] + beta * in2_grad[i]; break; } case ElementBinary::OP_SUB: { in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i]; in2_grad[i] = - alpha * out_grad[i] + beta * in2_grad[i]; break; } case ElementBinary::OP_MUL: { in1_grad[i] = alpha * out_grad[i] * in2[i] + beta * in1_grad[i]; in2_grad[i] = alpha * out_grad[i] * in1[i] + beta * in2_grad[i]; break; } case ElementBinary::OP_DIV: { in1_grad[i] = alpha * out_grad[i] / in2[i] + beta * in1_grad[i]; in2_grad[i] = - alpha * out_grad[i] * in1[i] / (in2[i] * in2[i]) + beta * in2_grad[i]; break; } default: assert(false); } } } /* regions[0](I): out_grad regions[1](I): in0 regions[2](I): in1 regions[3](I/O): in0_grad regions[4](I/O): in1_grad */ void ElementBinary::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { float alpha = 1.0f; const ElementBinary* ele = (const ElementBinary*) task->args; assert(regions.size() == 5); assert(task->regions.size() == 5); Domain out_grad_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain in0_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Domain in1_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); Domain in0_grad_domain = runtime->get_index_space_domain( ctx, task->regions[3].region.get_index_space()); Domain in1_grad_domain = runtime->get_index_space_domain( ctx, task->regions[4].region.get_index_space()); assert(out_grad_domain == in0_domain); assert(out_grad_domain == in1_domain); assert(out_grad_domain == in0_grad_domain); assert(out_grad_domain == in1_grad_domain); const float* out_grad_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); const float* in1_ptr = helperGetTensorPointerRO<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); const float* in2_ptr = helperGetTensorPointerRO<float>( regions[2], task->regions[2], FID_DATA, ctx, runtime); float* in1_grad_ptr = helperGetTensorPointerRW<float>( regions[3], task->regions[3], FID_DATA, ctx, runtime); float* in2_grad_ptr = helperGetTensorPointerRW<float>( regions[4], task->regions[4], FID_DATA, ctx, runtime); elewise_binary_backward_kernel<<<GET_BLOCKS(out_grad_domain.get_volume()), CUDA_NUM_THREADS>>>( out_grad_domain.get_volume(), alpha, alpha, ele->op_type, out_grad_ptr, in1_ptr, in2_ptr, in1_grad_ptr, in2_grad_ptr); } void ElementBinary::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(ELEMENTBINARY_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(ElementBinary)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(0, FID_DATA); // regions[1](I): input0 launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(1, FID_DATA); // regions[2](I): input1 launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): input0_grad launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I/O): input1_grad launcher.add_region_requirement( RegionRequirement(input_grad_lps[1], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[1].region_grad)); launcher.add_field(4, FID_DATA); runtime->execute_index_space(ctx, launcher); } bool ElementBinary::measure_compute_time(Simulator* sim, const ParallelConfig& pc, float& forward_time, float& backward_time) { //TODO: implement measure_forward return false; }
06444f98feb74fcf90c34a118ac1be658236892f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> //#include <cutil_inline.h> #include "GaussianBlurCUDA.h" #include "utility_CUDA.h" //filter kernel width range (don't change these) #define KERNEL_MAX_WIDTH 45 //do not change!!! #define KERNEL_MIN_WIDTH 5 //do not change!!! #define FILTER_WIDTH_FACTOR 5.0f #define THREADS_NUMBER_H 16 #define THREADS_NUMBER_V 12 //////////////////////////////////////////////////////////////////////////////// // Convolution kernel //////////////////////////////////////////////////////////////////////////////// __device__ __constant__ float g_Kernel[KERNEL_MAX_WIDTH]; // declare texture reference for 2D float texture texture<float, 2, hipReadModeElementType> tex32F0; //////////////////////////////////////////////////////////////////////////////// // GPU-specific defines //////////////////////////////////////////////////////////////////////////////// //Maps to a single instruction on G8x / G9x / G10x #define IMAD(a, b, c) ( __mul24((a), (b)) + (c) ) //Use unrolled innermost convolution loop //Round a / b to nearest higher integer value inline int iDivUp(int a, int b){ return (a % b != 0) ? (a / b + 1) : (a / b); } //Align a to nearest higher multiple of b inline int iAlignUp(int a, int b){ return (a % b != 0) ? (a - a % b + b) : a; } //////////////////////////////////////////////////////////////////////////////// // Kernel Row convolution filter //////////////////////////////////////////////////////////////////////////////// template<int FR> __global__ void convolutionRowsKernel( float *d_Dst, int imageW, int imageH ) { const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x); const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y); const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; if(ix >= imageW || iy >= imageH) return; float sum = 0.f; for(int k = -FR; k <= FR; k++){ sum += tex2D(tex32F0, x + (float)k, y) * g_Kernel[FR - k]; } d_Dst[ IMAD(iy, imageW, ix) ] = sum; } //////////////////////////////////////////////////////////////////////////////// // Kernel Column convolution filter //////////////////////////////////////////////////////////////////////////////// template<int FR> __global__ void convolutionColsKernel( float *d_Dst, int imageW, int imageH ) { const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x); const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y); const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; if(ix >= imageW || iy >= imageH) return; float sum = 0.f; for(int k = -FR; k <= FR; k++){ sum += tex2D(tex32F0, x, y + (float)k) * g_Kernel[FR - k]; } d_Dst[IMAD(iy, imageW, ix)] = sum; } GaussianBlurCUDA::GaussianBlurCUDA(int width, int height, float sigma): m_nWidth(width), m_nHeight(height), m_paraSigma(sigma) { hipChannelFormatDesc floatTex = hipCreateChannelDesc<float>(); //alloc cuda array: hipMallocArray(&m_cuaSrc, &floatTex, m_nWidth, m_nHeight); hipMallocArray(&m_cuaTmp, &floatTex, m_nWidth, m_nHeight); hipMallocArray(&m_cuaBlur, &floatTex, m_nWidth, m_nHeight); //alloc system memory: hipMalloc((void **)&m_buf32FA, m_nWidth*m_nHeight*sizeof(float)); //construct kernel for smoothing gradients float filter_kernel[KERNEL_MAX_WIDTH]; CreateFilterKernel(m_paraSigma, filter_kernel, m_nKernelWidth); hipMemcpyToSymbol(g_Kernel, filter_kernel, m_nKernelWidth*sizeof(float), 0, hipMemcpyHostToDevice); //copy kernel to device memory. } GaussianBlurCUDA::~GaussianBlurCUDA() { hipFreeArray(m_cuaSrc); hipFreeArray(m_cuaTmp); hipFreeArray(m_cuaBlur); hipFree(m_buf32FA); } void GaussianBlurCUDA::CreateFilterKernel(float sigma, float* kernel, int& width) { int i, sz; width = (int)(FILTER_WIDTH_FACTOR * sigma); if( width%2 == 0 ){ width+=1; } sz = (width-1)>>1; if(width > KERNEL_MAX_WIDTH) { //filter size truncation sz = KERNEL_MAX_WIDTH >> 1; width = KERNEL_MAX_WIDTH; }else if(width < KERNEL_MIN_WIDTH) { sz = KERNEL_MIN_WIDTH >> 1; width = KERNEL_MIN_WIDTH; } float rv = -0.5f/(sigma*sigma), v, ksum = 0.f; // pre-compute filter for( i = -sz ; i <= sz ; ++i) { kernel[i+sz] = v = exp( i * i * rv ) ; ksum += v; } //normalize the kernel //rv = 1.0f/ksum; for(i=0; i<width; i++) kernel[i]*=rv; for(i = 0; i < width; i++) kernel[i] = 1.0f/(float)width; } template<int FR> void GaussianBlurCUDA::FilterImage(hipArray *dst, hipArray *src) { dim3 threads(THREADS_NUMBER_H, THREADS_NUMBER_V); dim3 blocks( iDivUp(m_nWidth, threads.x), iDivUp(m_nHeight, threads.y) ); //number of blocks required //horizontal pass: hipBindTextureToArray(tex32F0, src); hipLaunchKernelGGL(( convolutionRowsKernel<FR>), dim3(blocks), dim3(threads), 0, 0, m_buf32FA, m_nWidth, m_nHeight ); hipUnbindTexture(tex32F0); hipMemcpyToArray(m_cuaTmp, 0, 0, m_buf32FA, m_nWidth*m_nHeight*sizeof(float), hipMemcpyDeviceToDevice); //vertical pass: hipBindTextureToArray(tex32F0, m_cuaTmp); hipLaunchKernelGGL(( convolutionColsKernel<FR>), dim3(blocks), dim3(threads), 0, 0, m_buf32FA, m_nWidth, m_nHeight ); hipUnbindTexture(tex32F0); hipMemcpyToArray( dst, 0, 0, m_buf32FA, m_nWidth*m_nHeight*sizeof(float), hipMemcpyDeviceToDevice); } void GaussianBlurCUDA::Filter(hipArray *dst, hipArray *src) { switch( m_nKernelWidth>>1 /*kernel radius*/ ) { case 2: FilterImage< 2>(dst, src); break; case 3: FilterImage< 3>(dst, src); break; case 4: FilterImage< 4>(dst, src); break; case 5: FilterImage< 5>(dst, src); break; case 6: FilterImage< 6>(dst, src); break; case 7: FilterImage< 7>(dst, src); break; case 8: FilterImage< 8>(dst, src); break; case 9: FilterImage< 9>(dst, src); break; case 10: FilterImage<10>(dst, src); break; case 11: FilterImage<11>(dst, src); break; case 12: FilterImage<12>(dst, src); break; case 13: FilterImage<13>(dst, src); break; case 14: FilterImage<14>(dst, src); break; case 15: FilterImage<15>(dst, src); break; case 16: FilterImage<16>(dst, src); break; case 17: FilterImage<17>(dst, src); break; case 18: FilterImage<18>(dst, src); break; case 19: FilterImage<19>(dst, src); break; case 20: FilterImage<20>(dst, src); break; case 21: FilterImage<21>(dst, src); break; case 22: FilterImage<22>(dst, src); break; default: break; } } int GaussianBlurCUDA::Filter( float* dst, const float* src ) { hipMemcpyToArray(m_cuaSrc, 0, 0, src, m_nWidth * m_nHeight * sizeof(float), hipMemcpyHostToDevice); Filter(m_cuaBlur, m_cuaSrc); hipMemcpy(dst, m_buf32FA, m_nWidth * m_nHeight * sizeof(float), hipMemcpyDeviceToHost); //GPU memory to CPU memory copy - slow!!! return 0; } int GaussianBlurCUDA::FilterMultipleImages(float *data, int pitch, int depth) { for(int i = 0; i < depth; i++) { CUDA_SAFE_CALL(hipMemcpy2DToArray(m_cuaSrc, 0, 0, data + i * pitch/sizeof(float) * m_nHeight, pitch, m_nWidth * sizeof(float), m_nHeight, hipMemcpyDeviceToDevice)); Filter(m_cuaBlur, m_cuaSrc); CudaCheckError(); CUDA_SAFE_CALL(hipMemcpy2DFromArray(data + i * pitch/sizeof(float) * m_nHeight, pitch, m_cuaBlur, 0, 0, m_nWidth * sizeof(float), m_nHeight, hipMemcpyDeviceToDevice)); //CUDA_SAFE_CALL(hipMemcpy2DFromArray(data + i * pitch/sizeof(float) * m_nHeight, pitch, m_cuaSrc, 0, 0, m_nWidth * sizeof(float), m_nHeight, hipMemcpyDeviceToDevice)); } return 0; }
06444f98feb74fcf90c34a118ac1be658236892f.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> //#include <cutil_inline.h> #include "GaussianBlurCUDA.h" #include "utility_CUDA.h" //filter kernel width range (don't change these) #define KERNEL_MAX_WIDTH 45 //do not change!!! #define KERNEL_MIN_WIDTH 5 //do not change!!! #define FILTER_WIDTH_FACTOR 5.0f #define THREADS_NUMBER_H 16 #define THREADS_NUMBER_V 12 //////////////////////////////////////////////////////////////////////////////// // Convolution kernel //////////////////////////////////////////////////////////////////////////////// __device__ __constant__ float g_Kernel[KERNEL_MAX_WIDTH]; // declare texture reference for 2D float texture texture<float, 2, cudaReadModeElementType> tex32F0; //////////////////////////////////////////////////////////////////////////////// // GPU-specific defines //////////////////////////////////////////////////////////////////////////////// //Maps to a single instruction on G8x / G9x / G10x #define IMAD(a, b, c) ( __mul24((a), (b)) + (c) ) //Use unrolled innermost convolution loop //Round a / b to nearest higher integer value inline int iDivUp(int a, int b){ return (a % b != 0) ? (a / b + 1) : (a / b); } //Align a to nearest higher multiple of b inline int iAlignUp(int a, int b){ return (a % b != 0) ? (a - a % b + b) : a; } //////////////////////////////////////////////////////////////////////////////// // Kernel Row convolution filter //////////////////////////////////////////////////////////////////////////////// template<int FR> __global__ void convolutionRowsKernel( float *d_Dst, int imageW, int imageH ) { const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x); const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y); const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; if(ix >= imageW || iy >= imageH) return; float sum = 0.f; for(int k = -FR; k <= FR; k++){ sum += tex2D(tex32F0, x + (float)k, y) * g_Kernel[FR - k]; } d_Dst[ IMAD(iy, imageW, ix) ] = sum; } //////////////////////////////////////////////////////////////////////////////// // Kernel Column convolution filter //////////////////////////////////////////////////////////////////////////////// template<int FR> __global__ void convolutionColsKernel( float *d_Dst, int imageW, int imageH ) { const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x); const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y); const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; if(ix >= imageW || iy >= imageH) return; float sum = 0.f; for(int k = -FR; k <= FR; k++){ sum += tex2D(tex32F0, x, y + (float)k) * g_Kernel[FR - k]; } d_Dst[IMAD(iy, imageW, ix)] = sum; } GaussianBlurCUDA::GaussianBlurCUDA(int width, int height, float sigma): m_nWidth(width), m_nHeight(height), m_paraSigma(sigma) { cudaChannelFormatDesc floatTex = cudaCreateChannelDesc<float>(); //alloc cuda array: cudaMallocArray(&m_cuaSrc, &floatTex, m_nWidth, m_nHeight); cudaMallocArray(&m_cuaTmp, &floatTex, m_nWidth, m_nHeight); cudaMallocArray(&m_cuaBlur, &floatTex, m_nWidth, m_nHeight); //alloc system memory: cudaMalloc((void **)&m_buf32FA, m_nWidth*m_nHeight*sizeof(float)); //construct kernel for smoothing gradients float filter_kernel[KERNEL_MAX_WIDTH]; CreateFilterKernel(m_paraSigma, filter_kernel, m_nKernelWidth); cudaMemcpyToSymbol(g_Kernel, filter_kernel, m_nKernelWidth*sizeof(float), 0, cudaMemcpyHostToDevice); //copy kernel to device memory. } GaussianBlurCUDA::~GaussianBlurCUDA() { cudaFreeArray(m_cuaSrc); cudaFreeArray(m_cuaTmp); cudaFreeArray(m_cuaBlur); cudaFree(m_buf32FA); } void GaussianBlurCUDA::CreateFilterKernel(float sigma, float* kernel, int& width) { int i, sz; width = (int)(FILTER_WIDTH_FACTOR * sigma); if( width%2 == 0 ){ width+=1; } sz = (width-1)>>1; if(width > KERNEL_MAX_WIDTH) { //filter size truncation sz = KERNEL_MAX_WIDTH >> 1; width = KERNEL_MAX_WIDTH; }else if(width < KERNEL_MIN_WIDTH) { sz = KERNEL_MIN_WIDTH >> 1; width = KERNEL_MIN_WIDTH; } float rv = -0.5f/(sigma*sigma), v, ksum = 0.f; // pre-compute filter for( i = -sz ; i <= sz ; ++i) { kernel[i+sz] = v = exp( i * i * rv ) ; ksum += v; } //normalize the kernel //rv = 1.0f/ksum; for(i=0; i<width; i++) kernel[i]*=rv; for(i = 0; i < width; i++) kernel[i] = 1.0f/(float)width; } template<int FR> void GaussianBlurCUDA::FilterImage(cudaArray *dst, cudaArray *src) { dim3 threads(THREADS_NUMBER_H, THREADS_NUMBER_V); dim3 blocks( iDivUp(m_nWidth, threads.x), iDivUp(m_nHeight, threads.y) ); //number of blocks required //horizontal pass: cudaBindTextureToArray(tex32F0, src); convolutionRowsKernel<FR><<<blocks, threads>>>( m_buf32FA, m_nWidth, m_nHeight ); cudaUnbindTexture(tex32F0); cudaMemcpyToArray(m_cuaTmp, 0, 0, m_buf32FA, m_nWidth*m_nHeight*sizeof(float), cudaMemcpyDeviceToDevice); //vertical pass: cudaBindTextureToArray(tex32F0, m_cuaTmp); convolutionColsKernel<FR><<<blocks, threads>>>( m_buf32FA, m_nWidth, m_nHeight ); cudaUnbindTexture(tex32F0); cudaMemcpyToArray( dst, 0, 0, m_buf32FA, m_nWidth*m_nHeight*sizeof(float), cudaMemcpyDeviceToDevice); } void GaussianBlurCUDA::Filter(cudaArray *dst, cudaArray *src) { switch( m_nKernelWidth>>1 /*kernel radius*/ ) { case 2: FilterImage< 2>(dst, src); break; case 3: FilterImage< 3>(dst, src); break; case 4: FilterImage< 4>(dst, src); break; case 5: FilterImage< 5>(dst, src); break; case 6: FilterImage< 6>(dst, src); break; case 7: FilterImage< 7>(dst, src); break; case 8: FilterImage< 8>(dst, src); break; case 9: FilterImage< 9>(dst, src); break; case 10: FilterImage<10>(dst, src); break; case 11: FilterImage<11>(dst, src); break; case 12: FilterImage<12>(dst, src); break; case 13: FilterImage<13>(dst, src); break; case 14: FilterImage<14>(dst, src); break; case 15: FilterImage<15>(dst, src); break; case 16: FilterImage<16>(dst, src); break; case 17: FilterImage<17>(dst, src); break; case 18: FilterImage<18>(dst, src); break; case 19: FilterImage<19>(dst, src); break; case 20: FilterImage<20>(dst, src); break; case 21: FilterImage<21>(dst, src); break; case 22: FilterImage<22>(dst, src); break; default: break; } } int GaussianBlurCUDA::Filter( float* dst, const float* src ) { cudaMemcpyToArray(m_cuaSrc, 0, 0, src, m_nWidth * m_nHeight * sizeof(float), cudaMemcpyHostToDevice); Filter(m_cuaBlur, m_cuaSrc); cudaMemcpy(dst, m_buf32FA, m_nWidth * m_nHeight * sizeof(float), cudaMemcpyDeviceToHost); //GPU memory to CPU memory copy - slow!!! return 0; } int GaussianBlurCUDA::FilterMultipleImages(float *data, int pitch, int depth) { for(int i = 0; i < depth; i++) { CUDA_SAFE_CALL(cudaMemcpy2DToArray(m_cuaSrc, 0, 0, data + i * pitch/sizeof(float) * m_nHeight, pitch, m_nWidth * sizeof(float), m_nHeight, cudaMemcpyDeviceToDevice)); Filter(m_cuaBlur, m_cuaSrc); CudaCheckError(); CUDA_SAFE_CALL(cudaMemcpy2DFromArray(data + i * pitch/sizeof(float) * m_nHeight, pitch, m_cuaBlur, 0, 0, m_nWidth * sizeof(float), m_nHeight, cudaMemcpyDeviceToDevice)); //CUDA_SAFE_CALL(cudaMemcpy2DFromArray(data + i * pitch/sizeof(float) * m_nHeight, pitch, m_cuaSrc, 0, 0, m_nWidth * sizeof(float), m_nHeight, cudaMemcpyDeviceToDevice)); } return 0; }
f72e1c24929083fdb2ffaad19a82c4b01c35f2a7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cudaNoConversion_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *data = NULL; hipMalloc(&data, XSIZE*YSIZE); float *tickOutputsTraces = NULL; hipMalloc(&tickOutputsTraces, XSIZE*YSIZE); float *tickOutputsTracesLearning = NULL; hipMalloc(&tickOutputsTracesLearning, XSIZE*YSIZE); float scaling = 1; unsigned int inputDimX = 1; unsigned int inputDimY = 1; unsigned int inputDimZ = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cudaNoConversion_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,tickOutputsTraces,tickOutputsTracesLearning,scaling,inputDimX,inputDimY,inputDimZ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cudaNoConversion_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,tickOutputsTraces,tickOutputsTracesLearning,scaling,inputDimX,inputDimY,inputDimZ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cudaNoConversion_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,tickOutputsTraces,tickOutputsTracesLearning,scaling,inputDimX,inputDimY,inputDimZ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f72e1c24929083fdb2ffaad19a82c4b01c35f2a7.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cudaNoConversion_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *data = NULL; cudaMalloc(&data, XSIZE*YSIZE); float *tickOutputsTraces = NULL; cudaMalloc(&tickOutputsTraces, XSIZE*YSIZE); float *tickOutputsTracesLearning = NULL; cudaMalloc(&tickOutputsTracesLearning, XSIZE*YSIZE); float scaling = 1; unsigned int inputDimX = 1; unsigned int inputDimY = 1; unsigned int inputDimZ = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cudaNoConversion_kernel<<<gridBlock,threadBlock>>>(data,tickOutputsTraces,tickOutputsTracesLearning,scaling,inputDimX,inputDimY,inputDimZ); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cudaNoConversion_kernel<<<gridBlock,threadBlock>>>(data,tickOutputsTraces,tickOutputsTracesLearning,scaling,inputDimX,inputDimY,inputDimZ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cudaNoConversion_kernel<<<gridBlock,threadBlock>>>(data,tickOutputsTraces,tickOutputsTracesLearning,scaling,inputDimX,inputDimY,inputDimZ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6d2355985b342a27968c2dd865dd8dbfd344d5ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdint.h> #include <stdlib.h> #include <string.h> #include <cutil.h> #include "util.h" #include "ref_2dhisto.h" #define BIN_COUNT 1024 void* AllocateDevice(size_t size) { void *addr; hipMalloc(&addr, size); return addr; } void MemCpyToDevice(void* dest, void* src, size_t size) { hipMemcpy(dest, src, size, hipMemcpyHostToDevice); } void CopyFromDevice(void* dest, void* src, size_t size) { hipMemcpy(dest, src, size, hipMemcpyDeviceToHost); } void FreeDevice(void* addr) { hipFree(addr); } __global__ void GenerateHist(uint32_t* input, size_t height, size_t width, uint32_t* global_bins) { int globalTid = blockIdx.x * blockDim.x + threadIdx.x; int numThreads = blockDim.x * gridDim.x; __shared__ int s_Hist[BIN_COUNT]; //clear partial histogram buffer #pragma unroll for (int pos = threadIdx.x; pos < BIN_COUNT; pos += numThreads) { s_Hist[pos] = 0; } __syncthreads (); //generate partial histogram #pragma unroll for (int pos = globalTid; pos < height * width; pos += numThreads) { if (s_Hist[input[pos]] < 255) { atomicAdd (s_Hist + input[pos], 1); } } __syncthreads(); //update global histogram #pragma unroll for(int pos = threadIdx.x; pos < BIN_COUNT; pos += numThreads) { if(global_bins[threadIdx.x] < 255) { atomicAdd(global_bins + pos, s_Hist[pos]); } } } __global__ void Trans32to8(uint32_t* global_bins, uint8_t* device_bins) { int globalTid = blockIdx.x * blockDim.x + threadIdx.x; if(global_bins[globalTid] < 255) { device_bins[globalTid] = (uint8_t)global_bins[globalTid]; } else { device_bins[globalTid] = (uint8_t)255; } } void opt_2dhisto(uint32_t* device_input, int height, int width, uint32_t* global_bins, uint8_t* device_bins) { /* This function should only contain a call to the GPU histogramming kernel. Any memory allocations and transfers must be done outside this function */ hipMemset(global_bins, 0, HISTO_HEIGHT * HISTO_WIDTH * sizeof(uint32_t)); hipLaunchKernelGGL(( GenerateHist), dim3(16), dim3(1024), 0, 0, device_input, height, width, global_bins); hipLaunchKernelGGL(( Trans32to8), dim3(1), dim3(1024), 0, 0, global_bins, device_bins); hipDeviceSynchronize(); } /* Include below the implementation of any other functions you need */
6d2355985b342a27968c2dd865dd8dbfd344d5ad.cu
#include <stdint.h> #include <stdlib.h> #include <string.h> #include <cutil.h> #include "util.h" #include "ref_2dhisto.h" #define BIN_COUNT 1024 void* AllocateDevice(size_t size) { void *addr; cudaMalloc(&addr, size); return addr; } void MemCpyToDevice(void* dest, void* src, size_t size) { cudaMemcpy(dest, src, size, cudaMemcpyHostToDevice); } void CopyFromDevice(void* dest, void* src, size_t size) { cudaMemcpy(dest, src, size, cudaMemcpyDeviceToHost); } void FreeDevice(void* addr) { cudaFree(addr); } __global__ void GenerateHist(uint32_t* input, size_t height, size_t width, uint32_t* global_bins) { int globalTid = blockIdx.x * blockDim.x + threadIdx.x; int numThreads = blockDim.x * gridDim.x; __shared__ int s_Hist[BIN_COUNT]; //clear partial histogram buffer #pragma unroll for (int pos = threadIdx.x; pos < BIN_COUNT; pos += numThreads) { s_Hist[pos] = 0; } __syncthreads (); //generate partial histogram #pragma unroll for (int pos = globalTid; pos < height * width; pos += numThreads) { if (s_Hist[input[pos]] < 255) { atomicAdd (s_Hist + input[pos], 1); } } __syncthreads(); //update global histogram #pragma unroll for(int pos = threadIdx.x; pos < BIN_COUNT; pos += numThreads) { if(global_bins[threadIdx.x] < 255) { atomicAdd(global_bins + pos, s_Hist[pos]); } } } __global__ void Trans32to8(uint32_t* global_bins, uint8_t* device_bins) { int globalTid = blockIdx.x * blockDim.x + threadIdx.x; if(global_bins[globalTid] < 255) { device_bins[globalTid] = (uint8_t)global_bins[globalTid]; } else { device_bins[globalTid] = (uint8_t)255; } } void opt_2dhisto(uint32_t* device_input, int height, int width, uint32_t* global_bins, uint8_t* device_bins) { /* This function should only contain a call to the GPU histogramming kernel. Any memory allocations and transfers must be done outside this function */ cudaMemset(global_bins, 0, HISTO_HEIGHT * HISTO_WIDTH * sizeof(uint32_t)); GenerateHist<<<16, 1024>>>(device_input, height, width, global_bins); Trans32to8<<<1, 1024>>>(global_bins, device_bins); cudaThreadSynchronize(); } /* Include below the implementation of any other functions you need */
aeabc0588bd71a1d7f60f46a51913481beac9f84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void ComputeDistanceKernel( int inputSize, float *distance, float *dimensionWeight, int maxCells, float *difference ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells) { float sum = 0.00f; float value; for(int i = 0; i < inputSize; i++) { value = difference[threadId * inputSize + i]; sum += dimensionWeight[i] * value*value; } distance[threadId] = sqrtf(sum); } }
aeabc0588bd71a1d7f60f46a51913481beac9f84.cu
#include "includes.h" __global__ void ComputeDistanceKernel( int inputSize, float *distance, float *dimensionWeight, int maxCells, float *difference ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells) { float sum = 0.00f; float value; for(int i = 0; i < inputSize; i++) { value = difference[threadId * inputSize + i]; sum += dimensionWeight[i] * value*value; } distance[threadId] = sqrtf(sum); } }