hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
73c3173a519bccad07855316ea20b7b6272b6462.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/reduction_ops.h" #include "caffe2/utils/conversions.h" #include <hipcub/hipcub.hpp> namespace caffe2 { REGISTER_CUDA_OPERATOR(SumElements, SumElementsOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SumSqrElements, SumSqrElementsOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(RowwiseMax, MaxReductionOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR(ColwiseMax, MaxReductionOp<float, CUDAContext, false>); REGISTER_CUDA_OPERATOR( RowwiseMaxGradient, MaxReductionGradientOp<float, CUDAContext, true>) REGISTER_CUDA_OPERATOR( ColwiseMaxGradient, MaxReductionGradientOp<float, CUDAContext, false>) REGISTER_CUDA_OPERATOR( SumElementsGradient, SumElementsGradientOp<float, CUDAContext>); template <typename T> __global__ void SumElementsGradientKernel(bool average, const int N, const T* dY, T* dX) { const T value = average ? (*dY) / N : *dY; CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = value; } } __global__ void rowwise_max_gradient_kernel( const int batch_size, const int M, const int N, const float* X, const float* Y, const float* dY, float* dX) { const int input_size = M * N; CUDA_1D_KERNEL_LOOP(i, batch_size * M * N) { const int b_i = i / input_size; const int b_n = i / input_size / N; const int y_index = b_i * M + b_n; if (X[i] == Y[y_index]) { dX[i] = dY[y_index]; } else { dX[i] = 0.0; } } } __global__ void colwise_max_gradient_kernel( const int batch_size, const int M, const int N, const float* X, const float* Y, const float* dY, float* dX) { const int input_size = M * N; CUDA_1D_KERNEL_LOOP(i, batch_size * M * N) { const int b_i = i / input_size; const int b_n = i % input_size % N; const int y_index = b_i * N + b_n; if (X[i] == Y[y_index]) { dX[i] = dY[y_index]; } else { dX[i] = 0.0; } } } template <> bool SumElementsGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& dY = Input(1); DCHECK_EQ(dY.size(), 1); auto* dX = Output(0); dX->ResizeLike(X); hipLaunchKernelGGL(( SumElementsGradientKernel<float>), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), average_, X.size(), dY.data<float>(), dX->mutable_data<float>()); return true; } template <typename T, class Context, bool ROWWISE> bool MaxReductionGradientOp<T, Context, ROWWISE>::RunOnDevice() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); auto* dX = Output(0); dX->ResizeLike(X); CAFFE_ENFORCE_EQ(X.ndim(), 3); const int batch_size = X.dim32(0); const int M = X.dim32(1); const int N = X.dim32(2); const T* Xdata = X.template data<T>(); const T* Ydata = Y.template data<T>(); const T* dYdata = dY.template data<T>(); T* dXdata = dX->template mutable_data<T>(); const int input_size = M * N; if (ROWWISE) { hipLaunchKernelGGL(( rowwise_max_gradient_kernel), dim3(CAFFE_GET_BLOCKS(batch_size * input_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), batch_size, M, N, Xdata, Ydata, dYdata, dXdata); } else { hipLaunchKernelGGL(( colwise_max_gradient_kernel), dim3(CAFFE_GET_BLOCKS(batch_size * input_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), batch_size, M, N, Xdata, Ydata, dYdata, dXdata); } return true; } } // namespace caffe2
73c3173a519bccad07855316ea20b7b6272b6462.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/reduction_ops.h" #include "caffe2/utils/conversions.h" #include <cub/cub.cuh> namespace caffe2 { REGISTER_CUDA_OPERATOR(SumElements, SumElementsOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SumSqrElements, SumSqrElementsOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(RowwiseMax, MaxReductionOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR(ColwiseMax, MaxReductionOp<float, CUDAContext, false>); REGISTER_CUDA_OPERATOR( RowwiseMaxGradient, MaxReductionGradientOp<float, CUDAContext, true>) REGISTER_CUDA_OPERATOR( ColwiseMaxGradient, MaxReductionGradientOp<float, CUDAContext, false>) REGISTER_CUDA_OPERATOR( SumElementsGradient, SumElementsGradientOp<float, CUDAContext>); template <typename T> __global__ void SumElementsGradientKernel(bool average, const int N, const T* dY, T* dX) { const T value = average ? (*dY) / N : *dY; CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = value; } } __global__ void rowwise_max_gradient_kernel( const int batch_size, const int M, const int N, const float* X, const float* Y, const float* dY, float* dX) { const int input_size = M * N; CUDA_1D_KERNEL_LOOP(i, batch_size * M * N) { const int b_i = i / input_size; const int b_n = i / input_size / N; const int y_index = b_i * M + b_n; if (X[i] == Y[y_index]) { dX[i] = dY[y_index]; } else { dX[i] = 0.0; } } } __global__ void colwise_max_gradient_kernel( const int batch_size, const int M, const int N, const float* X, const float* Y, const float* dY, float* dX) { const int input_size = M * N; CUDA_1D_KERNEL_LOOP(i, batch_size * M * N) { const int b_i = i / input_size; const int b_n = i % input_size % N; const int y_index = b_i * N + b_n; if (X[i] == Y[y_index]) { dX[i] = dY[y_index]; } else { dX[i] = 0.0; } } } template <> bool SumElementsGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& dY = Input(1); DCHECK_EQ(dY.size(), 1); auto* dX = Output(0); dX->ResizeLike(X); SumElementsGradientKernel<float><<< CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( average_, X.size(), dY.data<float>(), dX->mutable_data<float>()); return true; } template <typename T, class Context, bool ROWWISE> bool MaxReductionGradientOp<T, Context, ROWWISE>::RunOnDevice() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); auto* dX = Output(0); dX->ResizeLike(X); CAFFE_ENFORCE_EQ(X.ndim(), 3); const int batch_size = X.dim32(0); const int M = X.dim32(1); const int N = X.dim32(2); const T* Xdata = X.template data<T>(); const T* Ydata = Y.template data<T>(); const T* dYdata = dY.template data<T>(); T* dXdata = dX->template mutable_data<T>(); const int input_size = M * N; if (ROWWISE) { rowwise_max_gradient_kernel<<< CAFFE_GET_BLOCKS(batch_size * input_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( batch_size, M, N, Xdata, Ydata, dYdata, dXdata); } else { colwise_max_gradient_kernel<<< CAFFE_GET_BLOCKS(batch_size * input_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( batch_size, M, N, Xdata, Ydata, dYdata, dXdata); } return true; } } // namespace caffe2
8b0ae77731c10a84707d398c6e487447109edeb4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void current_calculate_postsynaptic_current_injection_kernel(float* d_synaptic_efficacies_or_weights, float* d_time_of_last_spike_to_reach_synapse, int* d_postsynaptic_neuron_indices, float* d_neurons_current_injections, float current_time_in_seconds, size_t total_number_of_synapses){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < total_number_of_synapses) { if (d_time_of_last_spike_to_reach_synapse[idx] == current_time_in_seconds) { atomicAdd(&d_neurons_current_injections[d_postsynaptic_neuron_indices[idx]], d_synaptic_efficacies_or_weights[idx]); } idx += blockDim.x * gridDim.x; } __syncthreads(); }
8b0ae77731c10a84707d398c6e487447109edeb4.cu
#include "includes.h" __global__ void current_calculate_postsynaptic_current_injection_kernel(float* d_synaptic_efficacies_or_weights, float* d_time_of_last_spike_to_reach_synapse, int* d_postsynaptic_neuron_indices, float* d_neurons_current_injections, float current_time_in_seconds, size_t total_number_of_synapses){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < total_number_of_synapses) { if (d_time_of_last_spike_to_reach_synapse[idx] == current_time_in_seconds) { atomicAdd(&d_neurons_current_injections[d_postsynaptic_neuron_indices[idx]], d_synaptic_efficacies_or_weights[idx]); } idx += blockDim.x * gridDim.x; } __syncthreads(); }
0875c7588d9e627275e3e8d95cd8c4f530f3b105.hip
// !!! This is a file automatically generated by hipify!!! #include "CUAPI.h" #if ( defined GPU && defined GRAVITY ) extern real (*d_Rho_Array_P )[ CUBE(RHO_NXT) ]; extern real (*d_Pot_Array_P_In )[ CUBE(POT_NXT) ]; extern real (*d_Pot_Array_P_Out)[ CUBE(GRA_NXT) ]; #ifdef UNSPLIT_GRAVITY extern real (*d_Pot_Array_USG_F)[ CUBE(USG_NXT_F) ]; extern real (*d_Pot_Array_USG_G)[ CUBE(USG_NXT_G) ]; extern real (*d_Flu_Array_USG_G)[GRA_NIN-1][ CUBE(PS1) ]; #endif extern real (*d_Flu_Array_G )[GRA_NIN ][ CUBE(PS1) ]; extern double (*d_Corner_Array_PGT)[3]; #ifdef DUAL_ENERGY extern char (*d_DE_Array_G )[ CUBE(PS1) ]; #endif #ifdef MHD extern real (*d_Emag_Array_G )[ CUBE(PS1) ]; #endif extern real (*d_Pot_Array_T )[ CUBE(GRA_NXT) ]; extern real *d_ExtPotTable; extern void **d_ExtPotGenePtr; //------------------------------------------------------------------------------------------------------- // Function : CUAPI_MemFree_PoissonGravity // Description : Free the device and host memory previously allocated by CUAPI_MemAllocate_PoissonGravity() // // Parameter : None //------------------------------------------------------------------------------------------------------- void CUAPI_MemFree_PoissonGravity() { // free the device memory if ( d_Rho_Array_P != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Rho_Array_P ) ); d_Rho_Array_P = NULL; } if ( d_Pot_Array_P_In != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Pot_Array_P_In ) ); d_Pot_Array_P_In = NULL; } if ( d_Pot_Array_P_Out != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Pot_Array_P_Out ) ); d_Pot_Array_P_Out = NULL; } # ifdef UNSPLIT_GRAVITY if ( d_Pot_Array_USG_F != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Pot_Array_USG_F ) ); d_Pot_Array_USG_F = NULL; } if ( d_Pot_Array_USG_G != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Pot_Array_USG_G ) ); d_Pot_Array_USG_G = NULL; } if ( d_Flu_Array_USG_G != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Flu_Array_USG_G ) ); d_Flu_Array_USG_G = NULL; } # endif if ( d_Flu_Array_G != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Flu_Array_G ) ); d_Flu_Array_G = NULL; } if ( d_Corner_Array_PGT != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Corner_Array_PGT ) ); d_Corner_Array_PGT = NULL; } # ifdef DUAL_ENERGY if ( d_DE_Array_G != NULL ) { CUDA_CHECK_ERROR( hipFree( d_DE_Array_G ) ); d_DE_Array_G = NULL; } # endif # ifdef MHD if ( d_Emag_Array_G != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Emag_Array_G ) ); d_Emag_Array_G = NULL; } # endif if ( d_Pot_Array_T != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Pot_Array_T ) ); d_Pot_Array_T = NULL; } if ( d_ExtPotTable != NULL ) { CUDA_CHECK_ERROR( hipFree( d_ExtPotTable ) ); d_ExtPotTable = NULL; } if ( d_ExtPotGenePtr != NULL ) { CUDA_CHECK_ERROR( hipFree( d_ExtPotGenePtr ) ); d_ExtPotGenePtr = NULL; } // free the host memory allocated by CUDA for (int t=0; t<2; t++) { if ( h_Rho_Array_P [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Rho_Array_P [t] ) ); h_Rho_Array_P [t] = NULL; } if ( h_Pot_Array_P_In [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Pot_Array_P_In [t] ) ); h_Pot_Array_P_In [t] = NULL; } if ( h_Pot_Array_P_Out [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Pot_Array_P_Out [t] ) ); h_Pot_Array_P_Out [t] = NULL; } # ifdef UNSPLIT_GRAVITY if ( h_Pot_Array_USG_F [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Pot_Array_USG_F [t] ) ); h_Pot_Array_USG_F [t] = NULL; } if ( h_Pot_Array_USG_G [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Pot_Array_USG_G [t] ) ); h_Pot_Array_USG_G [t] = NULL; } if ( h_Flu_Array_USG_G [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Flu_Array_USG_G [t] ) ); h_Flu_Array_USG_G [t] = NULL; } # endif if ( h_Flu_Array_G [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Flu_Array_G [t] ) ); h_Flu_Array_G [t] = NULL; } if ( h_Corner_Array_PGT[t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Corner_Array_PGT[t] ) ); h_Corner_Array_PGT[t] = NULL; } # ifdef DUAL_ENERGY if ( h_DE_Array_G [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_DE_Array_G [t] ) ); h_DE_Array_G [t] = NULL; } # endif # ifdef MHD if ( h_Emag_Array_G [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Emag_Array_G [t] ) ); h_Emag_Array_G [t] = NULL; } # endif if ( h_Pot_Array_T [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Pot_Array_T [t] ) ); h_Pot_Array_T [t] = NULL; } } // for (int t=0; t<2; t++) if ( h_ExtPotTable != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_ExtPotTable ) ); h_ExtPotTable = NULL; } if ( h_ExtPotGenePtr != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_ExtPotGenePtr ) ); h_ExtPotGenePtr = NULL; } } // FUNCTION : CUAPI_MemFree_PoissonGravity #endif // #if ( defined GPU && defined GRAVITY )
0875c7588d9e627275e3e8d95cd8c4f530f3b105.cu
#include "CUAPI.h" #if ( defined GPU && defined GRAVITY ) extern real (*d_Rho_Array_P )[ CUBE(RHO_NXT) ]; extern real (*d_Pot_Array_P_In )[ CUBE(POT_NXT) ]; extern real (*d_Pot_Array_P_Out)[ CUBE(GRA_NXT) ]; #ifdef UNSPLIT_GRAVITY extern real (*d_Pot_Array_USG_F)[ CUBE(USG_NXT_F) ]; extern real (*d_Pot_Array_USG_G)[ CUBE(USG_NXT_G) ]; extern real (*d_Flu_Array_USG_G)[GRA_NIN-1][ CUBE(PS1) ]; #endif extern real (*d_Flu_Array_G )[GRA_NIN ][ CUBE(PS1) ]; extern double (*d_Corner_Array_PGT)[3]; #ifdef DUAL_ENERGY extern char (*d_DE_Array_G )[ CUBE(PS1) ]; #endif #ifdef MHD extern real (*d_Emag_Array_G )[ CUBE(PS1) ]; #endif extern real (*d_Pot_Array_T )[ CUBE(GRA_NXT) ]; extern real *d_ExtPotTable; extern void **d_ExtPotGenePtr; //------------------------------------------------------------------------------------------------------- // Function : CUAPI_MemFree_PoissonGravity // Description : Free the device and host memory previously allocated by CUAPI_MemAllocate_PoissonGravity() // // Parameter : None //------------------------------------------------------------------------------------------------------- void CUAPI_MemFree_PoissonGravity() { // free the device memory if ( d_Rho_Array_P != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Rho_Array_P ) ); d_Rho_Array_P = NULL; } if ( d_Pot_Array_P_In != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Pot_Array_P_In ) ); d_Pot_Array_P_In = NULL; } if ( d_Pot_Array_P_Out != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Pot_Array_P_Out ) ); d_Pot_Array_P_Out = NULL; } # ifdef UNSPLIT_GRAVITY if ( d_Pot_Array_USG_F != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Pot_Array_USG_F ) ); d_Pot_Array_USG_F = NULL; } if ( d_Pot_Array_USG_G != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Pot_Array_USG_G ) ); d_Pot_Array_USG_G = NULL; } if ( d_Flu_Array_USG_G != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Flu_Array_USG_G ) ); d_Flu_Array_USG_G = NULL; } # endif if ( d_Flu_Array_G != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Flu_Array_G ) ); d_Flu_Array_G = NULL; } if ( d_Corner_Array_PGT != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Corner_Array_PGT ) ); d_Corner_Array_PGT = NULL; } # ifdef DUAL_ENERGY if ( d_DE_Array_G != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_DE_Array_G ) ); d_DE_Array_G = NULL; } # endif # ifdef MHD if ( d_Emag_Array_G != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Emag_Array_G ) ); d_Emag_Array_G = NULL; } # endif if ( d_Pot_Array_T != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Pot_Array_T ) ); d_Pot_Array_T = NULL; } if ( d_ExtPotTable != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_ExtPotTable ) ); d_ExtPotTable = NULL; } if ( d_ExtPotGenePtr != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_ExtPotGenePtr ) ); d_ExtPotGenePtr = NULL; } // free the host memory allocated by CUDA for (int t=0; t<2; t++) { if ( h_Rho_Array_P [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Rho_Array_P [t] ) ); h_Rho_Array_P [t] = NULL; } if ( h_Pot_Array_P_In [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Pot_Array_P_In [t] ) ); h_Pot_Array_P_In [t] = NULL; } if ( h_Pot_Array_P_Out [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Pot_Array_P_Out [t] ) ); h_Pot_Array_P_Out [t] = NULL; } # ifdef UNSPLIT_GRAVITY if ( h_Pot_Array_USG_F [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Pot_Array_USG_F [t] ) ); h_Pot_Array_USG_F [t] = NULL; } if ( h_Pot_Array_USG_G [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Pot_Array_USG_G [t] ) ); h_Pot_Array_USG_G [t] = NULL; } if ( h_Flu_Array_USG_G [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Flu_Array_USG_G [t] ) ); h_Flu_Array_USG_G [t] = NULL; } # endif if ( h_Flu_Array_G [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Flu_Array_G [t] ) ); h_Flu_Array_G [t] = NULL; } if ( h_Corner_Array_PGT[t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Corner_Array_PGT[t] ) ); h_Corner_Array_PGT[t] = NULL; } # ifdef DUAL_ENERGY if ( h_DE_Array_G [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_DE_Array_G [t] ) ); h_DE_Array_G [t] = NULL; } # endif # ifdef MHD if ( h_Emag_Array_G [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Emag_Array_G [t] ) ); h_Emag_Array_G [t] = NULL; } # endif if ( h_Pot_Array_T [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Pot_Array_T [t] ) ); h_Pot_Array_T [t] = NULL; } } // for (int t=0; t<2; t++) if ( h_ExtPotTable != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_ExtPotTable ) ); h_ExtPotTable = NULL; } if ( h_ExtPotGenePtr != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_ExtPotGenePtr ) ); h_ExtPotGenePtr = NULL; } } // FUNCTION : CUAPI_MemFree_PoissonGravity #endif // #if ( defined GPU && defined GRAVITY )
ae629d33ff5bb54593e49a7bb352d02d2e7ebb9d.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include </usr/users/hpcgif/hpcgif_9/Deep_Learning_GPU_CUDA/Eigen/Dense> #include <vector> #include <math.h> #include <random> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "device_atomic_functions.h" #include "timer.h" #include <stdio.h> #include <fstream> #include <stdlib.h> #define N 2048 #define THREADS_PER_BLOCK 512 using namespace Eigen; using namespace std; // The Cuda parallelized dot product __global__ void dot_par(float *aa, float *bb, float *cc) { int index = threadIdx.x + blockIdx.x * blockDim.x; __shared__ float temp[THREADS_PER_BLOCK]; temp[threadIdx.x] = aa[index] * bb[index]; __syncthreads(); if (0 == threadIdx.x) { float sum = 0; for (int i = 0; i < THREADS_PER_BLOCK; i++) { sum += temp[i]; } atomicAdd(cc, sum); } } // Fonction to load the data MatrixXf load_csv (const std::string & path) { std::ifstream indata; indata.open(path); std::string line; std::vector<float> values; int rows = 0; while (std::getline(indata, line)) { std::stringstream lineStream(line); std::string cell; while (std::getline(lineStream, cell, ',')) { values.push_back(std::stod(cell)); } ++rows; } return Map<const Matrix<typename MatrixXf::Scalar, MatrixXf::RowsAtCompileTime, MatrixXf::ColsAtCompileTime, RowMajor>>(values.data(), rows, values.size()/rows); } //This defines the sigmoid function MatrixXf sigmoid(MatrixXf X){ ArrayXXf expo = (-X).array().exp(); ArrayXXf result = 1 / (1 + expo); return(result.matrix()); } //This defines the sigmoid function for one point float sigmoid_i(float X){ float result = 1 / (1 + exp(-X)); return(result); } //This defines the dsigmoid function MatrixXf dsigmoid(MatrixXf X){ ArrayXXf sig = sigmoid(X).array(); ArrayXXf result = sig * (1 - sig); return(result.matrix()); } //This function defines the softmax function MatrixXf softmax(MatrixXf X){ ArrayXXf e = X.array().exp(); ArrayXf result = e / e.sum(); return (result.matrix()); } //This function initializes the coefficient void initialize(VectorXf &w, float &b, int dim){ w = ArrayXf::Random(dim).matrix(); b = 0; } // Propagation for the logistic regression void propagate(VectorXf w, float b, MatrixXf X, RowVectorXf y, VectorXf &dw, float &db, float &cost){ int m = X.cols(); MatrixXf A = sigmoid((w.transpose() * X).array() + b); cost = (-1. / m) * (((y.array() * A.array().log()) + ((1 - y.array()) * (1 - A.array()).log())).sum()); dw = (1. / m) * (X * ((A - y).transpose())); db = (1. / m) * ((A - y).sum()); } // Propagation for the logistic regression for one point void propagate_i(VectorXf w, float b, VectorXf X_i, float y_i, VectorXf &dw, float &db, float &cost){ float a_i = sigmoid_i(w.dot(X_i) + b); cost = -1 * ((y_i * log(a_i)) + ((1 - y_i) * log(1 - a_i))); dw = (X_i * (a_i - y_i)); db = a_i - y_i; } // Parallelized Propagation for the logistic regression for one point void propagate_i_par(VectorXf w, float b, VectorXf X_i, float y_i, VectorXf &dw, float &db, float &cost){ float *a, *b1, *c; float *dev_a, *dev_b, *dev_c; float size = N * sizeof(float); //allocate space for the variables on the device hipMalloc(&dev_a, size); hipMalloc(&dev_b, size); hipMalloc(&dev_c, sizeof(float)); //allocate space for the variables on the host a = (float *)malloc(size); b1 = (float *)malloc(size); c = (float *)malloc(sizeof(float)); dev_a = w.transpose().data(); dev_b = X_i.data(); hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice); hipMemcpy(dev_b, b1, size, hipMemcpyHostToDevice); hipMemset(dev_c, 0.0f, sizeof(float)); hipLaunchKernelGGL(( dot_par), dim3(N), dim3(THREADS_PER_BLOCK) , 0, 0, dev_a, dev_b, dev_c); hipMemcpy(c, dev_c, sizeof(float), hipMemcpyDeviceToHost); float a_i = sigmoid_i(*c + b); free(a); free(b1); free(c); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); cost = -1 * ((y_i * log(a_i)) + ((1 - y_i) * log(1 - a_i))); dw = (X_i * (a_i - y_i)); db = a_i - y_i; } // Propagation for the logistic regression for the whole gradient void propagate_par(VectorXf w, float b, MatrixXf X, RowVectorXf y, VectorXf &dw, float &db, float &cost){ int m = X.cols(); int d = X.rows(); VectorXf dw_a(0); float db_a=0, cost_a=0; for (int i=0; i<m; i++){ propagate_i_par(w, b, X.col(i), y(i), dw, db, cost); cost_a += cost; dw_a += dw; db_a += db;} dw = (1.0 / m) * dw_a; db = (1.0 / m) * db_a; cost = (1.0 / m) * cost_a; } // Optimize function void optimize(VectorXf &w, float &b, VectorXf &dw, float &db, MatrixXf X, RowVectorXf y, int numIterations, float learningRate, vector<float> &costs, bool par=false, bool sgd=false, bool printcost=true){ int m = X.cols(); float cost; for(int j = 0; j < numIterations; j++){ if (sgd == true){ random_device rd; mt19937 gen(rd()); uniform_int_distribution<int> dis(0, m - 1); int i = dis(gen); if (par == true){ propagate_i_par(w, b, X.col(i), y(i), dw, db, cost);} else{ propagate_i(w, b, X.col(i), y(i), dw, db, cost); }} else{ if (par == true){ propagate_par(w, b, X, y, dw, db, cost);} else{ propagate(w, b, X, y, dw, db, cost); }} w = w - ((learningRate / sqrt(j + 1)) * dw); b = b - ((learningRate / sqrt(j + 1)) * db); if (j % 100 == 0){ costs.push_back(cost); } if((j % 1000) == 0 && printcost==true){ cout << "Cost after iteration " << j << ": " << cost << endl;} } } // Function used for the prediction RowVectorXf predict(VectorXf w, float b, MatrixXf X){ int m = X.cols(); RowVectorXf yPrediction(m); MatrixXf A = sigmoid((w.transpose() * X).array() + b); for(int i = 0; i < A.cols(); i++){ if(A(0, i) <= 0.5){ yPrediction(0, i) = 0; } else{ yPrediction(0, i) = 1; } } return(yPrediction); } // Main Model gathering all the functions void model(MatrixXf xTrain, RowVectorXf yTrain, MatrixXf xTest, RowVectorXf yTest, RowVectorXf &yPredictionsTrain, RowVectorXf &yPredictionsTest, VectorXf &w, float &b, std::vector<float> &costs, const int &numIterations, const float &learningRate, bool par, bool sgd){ initialize(w, b, xTrain.rows()); VectorXf dw; float db; if (par==true && sgd==true){ optimize(w, b, dw, db, xTrain, yTrain, numIterations, learningRate, costs, true, true);} if (par==true && sgd==false){ optimize(w, b, dw, db, xTrain, yTrain, numIterations, learningRate, costs, true, false);} if (par==false && sgd==true){ optimize(w, b, dw, db, xTrain, yTrain, numIterations, learningRate, costs, false, true);} if (par==false && sgd==false){ optimize(w, b, dw, db, xTrain, yTrain, numIterations, learningRate, costs, false, false);} yPredictionsTrain = predict(w, b, xTrain); yPredictionsTest = predict(w, b, xTest); cout << "train accuracy: " << 100 - ((yPredictionsTrain - yTrain).array().abs().sum() / float(yTrain.size())) * 100 << endl; cout << "test accuracy: " << 100 - ((yPredictionsTest - yTest).array().abs().sum() / float(yTest.size())) * 100 << endl; } int main(){ Timer Tim1, Tim2; VectorXf w, dw; float b; MatrixXf xTrain = load_csv("trainingImages.csv") / 255.0; RowVectorXf yTrain = load_csv("trainingLabels.csv"); MatrixXf xTest = load_csv("testImages.csv") / 255.0; RowVectorXf yTest = load_csv("testLabels.csv"); std::cout << "x train: " << xTrain.rows() << " " << xTrain.cols() << std::endl; std::cout << "y train: " << yTrain.rows() << " " << yTrain.cols() << std::endl; std::cout << "x test: " << xTest.rows() << " " << xTest.cols() << std::endl; std::cout << "y test: " << yTest.rows() << " " << yTest.cols() << std::endl; RowVectorXf yPredictionsTrain, yPredictionsTest; vector<float> costs; cout << "Warming the GPU..." << endl; model(xTrain, yTrain, xTest, yTest, yPredictionsTrain, yPredictionsTest, w, b, costs, 1000, 0.1, true, true); Tim1.start(); model(xTrain, yTrain, xTest, yTest, yPredictionsTrain, yPredictionsTest, w, b, costs, 10000, 0.01, true, true); Tim1.add(); cout << "With SGD GPU Time is: " << Tim1.getsum() << " seconds" << endl; Tim2.start(); model(xTrain, yTrain, xTest, yTest, yPredictionsTrain, yPredictionsTest, w, b, costs, 10000, 0.01, false, true); Tim2.add(); cout << "With SGD CPU Time is: " << Tim2.getsum() << " seconds" << endl; return(0); }
ae629d33ff5bb54593e49a7bb352d02d2e7ebb9d.cu
#include <iostream> #include </usr/users/hpcgif/hpcgif_9/Deep_Learning_GPU_CUDA/Eigen/Dense> #include <vector> #include <math.h> #include <random> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_atomic_functions.h" #include "timer.h" #include <stdio.h> #include <fstream> #include <stdlib.h> #define N 2048 #define THREADS_PER_BLOCK 512 using namespace Eigen; using namespace std; // The Cuda parallelized dot product __global__ void dot_par(float *aa, float *bb, float *cc) { int index = threadIdx.x + blockIdx.x * blockDim.x; __shared__ float temp[THREADS_PER_BLOCK]; temp[threadIdx.x] = aa[index] * bb[index]; __syncthreads(); if (0 == threadIdx.x) { float sum = 0; for (int i = 0; i < THREADS_PER_BLOCK; i++) { sum += temp[i]; } atomicAdd(cc, sum); } } // Fonction to load the data MatrixXf load_csv (const std::string & path) { std::ifstream indata; indata.open(path); std::string line; std::vector<float> values; int rows = 0; while (std::getline(indata, line)) { std::stringstream lineStream(line); std::string cell; while (std::getline(lineStream, cell, ',')) { values.push_back(std::stod(cell)); } ++rows; } return Map<const Matrix<typename MatrixXf::Scalar, MatrixXf::RowsAtCompileTime, MatrixXf::ColsAtCompileTime, RowMajor>>(values.data(), rows, values.size()/rows); } //This defines the sigmoid function MatrixXf sigmoid(MatrixXf X){ ArrayXXf expo = (-X).array().exp(); ArrayXXf result = 1 / (1 + expo); return(result.matrix()); } //This defines the sigmoid function for one point float sigmoid_i(float X){ float result = 1 / (1 + exp(-X)); return(result); } //This defines the dsigmoid function MatrixXf dsigmoid(MatrixXf X){ ArrayXXf sig = sigmoid(X).array(); ArrayXXf result = sig * (1 - sig); return(result.matrix()); } //This function defines the softmax function MatrixXf softmax(MatrixXf X){ ArrayXXf e = X.array().exp(); ArrayXf result = e / e.sum(); return (result.matrix()); } //This function initializes the coefficient void initialize(VectorXf &w, float &b, int dim){ w = ArrayXf::Random(dim).matrix(); b = 0; } // Propagation for the logistic regression void propagate(VectorXf w, float b, MatrixXf X, RowVectorXf y, VectorXf &dw, float &db, float &cost){ int m = X.cols(); MatrixXf A = sigmoid((w.transpose() * X).array() + b); cost = (-1. / m) * (((y.array() * A.array().log()) + ((1 - y.array()) * (1 - A.array()).log())).sum()); dw = (1. / m) * (X * ((A - y).transpose())); db = (1. / m) * ((A - y).sum()); } // Propagation for the logistic regression for one point void propagate_i(VectorXf w, float b, VectorXf X_i, float y_i, VectorXf &dw, float &db, float &cost){ float a_i = sigmoid_i(w.dot(X_i) + b); cost = -1 * ((y_i * log(a_i)) + ((1 - y_i) * log(1 - a_i))); dw = (X_i * (a_i - y_i)); db = a_i - y_i; } // Parallelized Propagation for the logistic regression for one point void propagate_i_par(VectorXf w, float b, VectorXf X_i, float y_i, VectorXf &dw, float &db, float &cost){ float *a, *b1, *c; float *dev_a, *dev_b, *dev_c; float size = N * sizeof(float); //allocate space for the variables on the device cudaMalloc(&dev_a, size); cudaMalloc(&dev_b, size); cudaMalloc(&dev_c, sizeof(float)); //allocate space for the variables on the host a = (float *)malloc(size); b1 = (float *)malloc(size); c = (float *)malloc(sizeof(float)); dev_a = w.transpose().data(); dev_b = X_i.data(); cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b1, size, cudaMemcpyHostToDevice); cudaMemset(dev_c, 0.0f, sizeof(float)); dot_par<<< N, THREADS_PER_BLOCK >>>(dev_a, dev_b, dev_c); cudaMemcpy(c, dev_c, sizeof(float), cudaMemcpyDeviceToHost); float a_i = sigmoid_i(*c + b); free(a); free(b1); free(c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cost = -1 * ((y_i * log(a_i)) + ((1 - y_i) * log(1 - a_i))); dw = (X_i * (a_i - y_i)); db = a_i - y_i; } // Propagation for the logistic regression for the whole gradient void propagate_par(VectorXf w, float b, MatrixXf X, RowVectorXf y, VectorXf &dw, float &db, float &cost){ int m = X.cols(); int d = X.rows(); VectorXf dw_a(0); float db_a=0, cost_a=0; for (int i=0; i<m; i++){ propagate_i_par(w, b, X.col(i), y(i), dw, db, cost); cost_a += cost; dw_a += dw; db_a += db;} dw = (1.0 / m) * dw_a; db = (1.0 / m) * db_a; cost = (1.0 / m) * cost_a; } // Optimize function void optimize(VectorXf &w, float &b, VectorXf &dw, float &db, MatrixXf X, RowVectorXf y, int numIterations, float learningRate, vector<float> &costs, bool par=false, bool sgd=false, bool printcost=true){ int m = X.cols(); float cost; for(int j = 0; j < numIterations; j++){ if (sgd == true){ random_device rd; mt19937 gen(rd()); uniform_int_distribution<int> dis(0, m - 1); int i = dis(gen); if (par == true){ propagate_i_par(w, b, X.col(i), y(i), dw, db, cost);} else{ propagate_i(w, b, X.col(i), y(i), dw, db, cost); }} else{ if (par == true){ propagate_par(w, b, X, y, dw, db, cost);} else{ propagate(w, b, X, y, dw, db, cost); }} w = w - ((learningRate / sqrt(j + 1)) * dw); b = b - ((learningRate / sqrt(j + 1)) * db); if (j % 100 == 0){ costs.push_back(cost); } if((j % 1000) == 0 && printcost==true){ cout << "Cost after iteration " << j << ": " << cost << endl;} } } // Function used for the prediction RowVectorXf predict(VectorXf w, float b, MatrixXf X){ int m = X.cols(); RowVectorXf yPrediction(m); MatrixXf A = sigmoid((w.transpose() * X).array() + b); for(int i = 0; i < A.cols(); i++){ if(A(0, i) <= 0.5){ yPrediction(0, i) = 0; } else{ yPrediction(0, i) = 1; } } return(yPrediction); } // Main Model gathering all the functions void model(MatrixXf xTrain, RowVectorXf yTrain, MatrixXf xTest, RowVectorXf yTest, RowVectorXf &yPredictionsTrain, RowVectorXf &yPredictionsTest, VectorXf &w, float &b, std::vector<float> &costs, const int &numIterations, const float &learningRate, bool par, bool sgd){ initialize(w, b, xTrain.rows()); VectorXf dw; float db; if (par==true && sgd==true){ optimize(w, b, dw, db, xTrain, yTrain, numIterations, learningRate, costs, true, true);} if (par==true && sgd==false){ optimize(w, b, dw, db, xTrain, yTrain, numIterations, learningRate, costs, true, false);} if (par==false && sgd==true){ optimize(w, b, dw, db, xTrain, yTrain, numIterations, learningRate, costs, false, true);} if (par==false && sgd==false){ optimize(w, b, dw, db, xTrain, yTrain, numIterations, learningRate, costs, false, false);} yPredictionsTrain = predict(w, b, xTrain); yPredictionsTest = predict(w, b, xTest); cout << "train accuracy: " << 100 - ((yPredictionsTrain - yTrain).array().abs().sum() / float(yTrain.size())) * 100 << endl; cout << "test accuracy: " << 100 - ((yPredictionsTest - yTest).array().abs().sum() / float(yTest.size())) * 100 << endl; } int main(){ Timer Tim1, Tim2; VectorXf w, dw; float b; MatrixXf xTrain = load_csv("trainingImages.csv") / 255.0; RowVectorXf yTrain = load_csv("trainingLabels.csv"); MatrixXf xTest = load_csv("testImages.csv") / 255.0; RowVectorXf yTest = load_csv("testLabels.csv"); std::cout << "x train: " << xTrain.rows() << " " << xTrain.cols() << std::endl; std::cout << "y train: " << yTrain.rows() << " " << yTrain.cols() << std::endl; std::cout << "x test: " << xTest.rows() << " " << xTest.cols() << std::endl; std::cout << "y test: " << yTest.rows() << " " << yTest.cols() << std::endl; RowVectorXf yPredictionsTrain, yPredictionsTest; vector<float> costs; cout << "Warming the GPU..." << endl; model(xTrain, yTrain, xTest, yTest, yPredictionsTrain, yPredictionsTest, w, b, costs, 1000, 0.1, true, true); Tim1.start(); model(xTrain, yTrain, xTest, yTest, yPredictionsTrain, yPredictionsTest, w, b, costs, 10000, 0.01, true, true); Tim1.add(); cout << "With SGD GPU Time is: " << Tim1.getsum() << " seconds" << endl; Tim2.start(); model(xTrain, yTrain, xTest, yTest, yPredictionsTrain, yPredictionsTest, w, b, costs, 10000, 0.01, false, true); Tim2.add(); cout << "With SGD CPU Time is: " << Tim2.getsum() << " seconds" << endl; return(0); }
87f164ba0914be59560cc682a91a3befceb1591d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2022 Institute of Parallel and Distributed Systems, Shanghai Jiao Tong University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <cassert> #include <chrono> #include <cstdio> #include <hipcub/hipcub.hpp> #include "../common.h" #include "../constant.h" #include "../device.h" #include "../logging.h" #include "../profiler.h" #include "../timer.h" #include "cuda_function.h" #include "cuda_utils.h" namespace samgraph { namespace common { namespace cuda { namespace { __device__ bool insert_hash_table(IdType* hash_table, IdType* round_id_table, IdType val, IdType round_id) { IdType pos = val % 50; IdType gap = 1; while (true) { if (round_id_table[pos] != round_id) { round_id_table[pos] = round_id; hash_table[pos] = val; return true; } else if (hash_table[pos] == val) { return false; } else { pos = (pos + gap) % 50; gap ++; } } } template <size_t BLOCK_SIZE, size_t TILE_SIZE> __global__ void sample_weighted_khop_hash_dedup(const IdType *indptr, const IdType *indices, const float *prob_table, const IdType *alias_table, const IdType *input, const size_t num_input, const size_t fanout, IdType *tmp_src, IdType *tmp_dst, hiprandState_t *random_states, size_t num_random_states) { assert(BLOCK_SIZE == blockDim.x); const size_t block_start = TILE_SIZE * blockIdx.x; const size_t block_end = TILE_SIZE * (blockIdx.x + 1); size_t i = blockDim.x * blockIdx.x + threadIdx.x; assert(i < num_random_states); hiprandState_t local_state = random_states[i]; IdType hash_table[100]; for (int i = 0; i < 100; i++) { hash_table[i] = Constant::kEmptyKey; } IdType* round_id_table = hash_table + 50; for (size_t index = threadIdx.x + block_start; index < block_end; index += BLOCK_SIZE) { if (index < num_input) { const IdType rid = input[index]; const IdType off = indptr[rid]; const IdType len = indptr[rid + 1] - indptr[rid]; if (len <= fanout) { size_t j = 0; for (; j < len; ++j) { tmp_src[index * fanout + j] = rid; tmp_dst[index * fanout + j] = indices[off + j]; } for (; j < fanout; ++j) { tmp_src[index * fanout + j] = Constant::kEmptyKey; tmp_dst[index * fanout + j] = Constant::kEmptyKey; } } else { size_t selected_unique = 0; for (; selected_unique < fanout;) { size_t k = hiprand(&local_state) % len; float r = hiprand_uniform(&local_state); IdType rand_result = indices[off + k]; if (r > prob_table[off + k]) { rand_result = alias_table[off + k]; } if (insert_hash_table(hash_table, round_id_table, rand_result, rid) == false) continue; tmp_src[index * fanout + selected_unique] = rid; tmp_dst[index * fanout + selected_unique] = rand_result; selected_unique++; } } } } random_states[i] = local_state; } template <size_t BLOCK_SIZE, size_t TILE_SIZE> __global__ void count_edge(IdType *edge_src, size_t *item_prefix, const size_t num_input, const size_t fanout) { assert(BLOCK_SIZE == blockDim.x); using BlockReduce = typename hipcub::BlockReduce<size_t, BLOCK_SIZE>; const size_t block_start = TILE_SIZE * blockIdx.x; const size_t block_end = TILE_SIZE * (blockIdx.x + 1); size_t count = 0; #pragma unroll for (size_t index = threadIdx.x + block_start; index < block_end; index += BLOCK_SIZE) { if (index < num_input) { for (size_t j = 0; j < fanout; j++) { if (edge_src[index * fanout + j] != Constant::kEmptyKey) { ++count; } } } } __shared__ typename BlockReduce::TempStorage temp_space; count = BlockReduce(temp_space).Sum(count); if (threadIdx.x == 0) { item_prefix[blockIdx.x] = count; if (blockIdx.x == 0) { item_prefix[gridDim.x] = 0; } } } template <size_t BLOCK_SIZE, size_t TILE_SIZE> __global__ void compact_edge(const IdType *tmp_src, const IdType *tmp_dst, IdType *out_src, IdType *out_dst, size_t *num_out, const size_t *item_prefix, const size_t num_input, const size_t fanout) { assert(BLOCK_SIZE == blockDim.x); using BlockScan = typename hipcub::BlockScan<size_t, BLOCK_SIZE>; constexpr const size_t VALS_PER_THREAD = TILE_SIZE / BLOCK_SIZE; __shared__ typename BlockScan::TempStorage temp_space; const size_t offset = item_prefix[blockIdx.x]; BlockPrefixCallbackOp<size_t> prefix_op(0); // count successful placements for (size_t i = 0; i < VALS_PER_THREAD; ++i) { const size_t index = threadIdx.x + i * BLOCK_SIZE + blockIdx.x * TILE_SIZE; size_t item_per_thread = 0; if (index < num_input) { for (size_t j = 0; j < fanout; j++) { if (tmp_src[index * fanout + j] != Constant::kEmptyKey) { item_per_thread++; } } } size_t item_prefix_per_thread = item_per_thread; BlockScan(temp_space) .ExclusiveSum(item_prefix_per_thread, item_prefix_per_thread, prefix_op); __syncthreads(); for (size_t j = 0; j < item_per_thread; j++) { out_src[offset + item_prefix_per_thread + j] = tmp_src[index * fanout + j]; out_dst[offset + item_prefix_per_thread + j] = tmp_dst[index * fanout + j]; } } if (threadIdx.x == 0 && blockIdx.x == 0) { *num_out = item_prefix[gridDim.x]; } } } // namespace void GPUSampleWeightedKHopHashDedup(const IdType *indptr, const IdType *indices, const float *prob_table, const IdType *alias_table, const IdType *input, const size_t num_input, const size_t fanout, IdType *out_src, IdType *out_dst, size_t *num_out, Context ctx, StreamHandle stream, GPURandomStates *random_states, uint64_t task_key) { LOG(DEBUG) << "GPUSample: begin with num_input " << num_input << " and fanout " << fanout; Timer t0; const size_t num_tiles = RoundUpDiv(num_input, Constant::kCudaTileSize); const dim3 grid(num_tiles); const dim3 block(Constant::kCudaBlockSize); auto sampler_device = Device::Get(ctx); auto cu_stream = static_cast<hipStream_t>(stream); IdType *tmp_src = static_cast<IdType *>( sampler_device->AllocWorkspace(ctx, sizeof(IdType) * num_input * fanout)); IdType *tmp_dst = static_cast<IdType *>( sampler_device->AllocWorkspace(ctx, sizeof(IdType) * num_input * fanout)); LOG(DEBUG) << "GPUSample: cuda tmp_src malloc " << ToReadableSize(num_input * fanout * sizeof(IdType)); LOG(DEBUG) << "GPUSample: cuda tmp_dst malloc " << ToReadableSize(num_input * fanout * sizeof(IdType)); hipLaunchKernelGGL(( sample_weighted_khop_hash_dedup<Constant::kCudaBlockSize, Constant::kCudaTileSize>) , dim3(grid), dim3(block), 0, cu_stream, indptr, indices, prob_table, alias_table, input, num_input, fanout, tmp_src, tmp_dst, random_states->GetStates(), random_states->NumStates()); sampler_device->StreamSync(ctx, stream); double sample_time = t0.Passed(); Timer t1; size_t *item_prefix = static_cast<size_t *>( sampler_device->AllocWorkspace(ctx, sizeof(size_t) * (grid.x + 1))); LOG(DEBUG) << "GPUSample: cuda item_prefix malloc " << ToReadableSize(sizeof(size_t) * (grid.x + 1)); hipLaunchKernelGGL(( count_edge<Constant::kCudaBlockSize, Constant::kCudaTileSize>) , dim3(grid), dim3(block), 0, cu_stream, tmp_src, item_prefix, num_input, fanout); sampler_device->StreamSync(ctx, stream); double count_edge_time = t1.Passed(); Timer t2; size_t workspace_bytes; CUDA_CALL(hipcub::DeviceScan::ExclusiveSum( nullptr, workspace_bytes, static_cast<size_t *>(nullptr), static_cast<size_t *>(nullptr), grid.x + 1, cu_stream)); sampler_device->StreamSync(ctx, stream); void *workspace = sampler_device->AllocWorkspace(ctx, workspace_bytes); CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(workspace, workspace_bytes, item_prefix, item_prefix, grid.x + 1, cu_stream)); sampler_device->StreamSync(ctx, stream); LOG(DEBUG) << "GPUSample: cuda workspace malloc " << ToReadableSize(workspace_bytes); hipLaunchKernelGGL(( compact_edge<Constant::kCudaBlockSize, Constant::kCudaTileSize>) , dim3(grid), dim3(block), 0, cu_stream, tmp_src, tmp_dst, out_src, out_dst, num_out, item_prefix, num_input, fanout); sampler_device->StreamSync(ctx, stream); double compact_edge_time = t2.Passed(); sampler_device->FreeWorkspace(ctx, workspace); sampler_device->FreeWorkspace(ctx, item_prefix); sampler_device->FreeWorkspace(ctx, tmp_src); sampler_device->FreeWorkspace(ctx, tmp_dst); Profiler::Get().LogStepAdd(task_key, kLogL3KHopSampleCooTime, sample_time); Profiler::Get().LogStepAdd(task_key, kLogL3KHopSampleCountEdgeTime, count_edge_time); Profiler::Get().LogStepAdd(task_key, kLogL3KHopSampleCompactEdgesTime, compact_edge_time); LOG(DEBUG) << "GPUSample: succeed "; } } // namespace cuda } // namespace common } // namespace samgraph
87f164ba0914be59560cc682a91a3befceb1591d.cu
/* * Copyright 2022 Institute of Parallel and Distributed Systems, Shanghai Jiao Tong University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <curand.h> #include <curand_kernel.h> #include <cassert> #include <chrono> #include <cstdio> #include <cub/cub.cuh> #include "../common.h" #include "../constant.h" #include "../device.h" #include "../logging.h" #include "../profiler.h" #include "../timer.h" #include "cuda_function.h" #include "cuda_utils.h" namespace samgraph { namespace common { namespace cuda { namespace { __device__ bool insert_hash_table(IdType* hash_table, IdType* round_id_table, IdType val, IdType round_id) { IdType pos = val % 50; IdType gap = 1; while (true) { if (round_id_table[pos] != round_id) { round_id_table[pos] = round_id; hash_table[pos] = val; return true; } else if (hash_table[pos] == val) { return false; } else { pos = (pos + gap) % 50; gap ++; } } } template <size_t BLOCK_SIZE, size_t TILE_SIZE> __global__ void sample_weighted_khop_hash_dedup(const IdType *indptr, const IdType *indices, const float *prob_table, const IdType *alias_table, const IdType *input, const size_t num_input, const size_t fanout, IdType *tmp_src, IdType *tmp_dst, curandState *random_states, size_t num_random_states) { assert(BLOCK_SIZE == blockDim.x); const size_t block_start = TILE_SIZE * blockIdx.x; const size_t block_end = TILE_SIZE * (blockIdx.x + 1); size_t i = blockDim.x * blockIdx.x + threadIdx.x; assert(i < num_random_states); curandState local_state = random_states[i]; IdType hash_table[100]; for (int i = 0; i < 100; i++) { hash_table[i] = Constant::kEmptyKey; } IdType* round_id_table = hash_table + 50; for (size_t index = threadIdx.x + block_start; index < block_end; index += BLOCK_SIZE) { if (index < num_input) { const IdType rid = input[index]; const IdType off = indptr[rid]; const IdType len = indptr[rid + 1] - indptr[rid]; if (len <= fanout) { size_t j = 0; for (; j < len; ++j) { tmp_src[index * fanout + j] = rid; tmp_dst[index * fanout + j] = indices[off + j]; } for (; j < fanout; ++j) { tmp_src[index * fanout + j] = Constant::kEmptyKey; tmp_dst[index * fanout + j] = Constant::kEmptyKey; } } else { size_t selected_unique = 0; for (; selected_unique < fanout;) { size_t k = curand(&local_state) % len; float r = curand_uniform(&local_state); IdType rand_result = indices[off + k]; if (r > prob_table[off + k]) { rand_result = alias_table[off + k]; } if (insert_hash_table(hash_table, round_id_table, rand_result, rid) == false) continue; tmp_src[index * fanout + selected_unique] = rid; tmp_dst[index * fanout + selected_unique] = rand_result; selected_unique++; } } } } random_states[i] = local_state; } template <size_t BLOCK_SIZE, size_t TILE_SIZE> __global__ void count_edge(IdType *edge_src, size_t *item_prefix, const size_t num_input, const size_t fanout) { assert(BLOCK_SIZE == blockDim.x); using BlockReduce = typename cub::BlockReduce<size_t, BLOCK_SIZE>; const size_t block_start = TILE_SIZE * blockIdx.x; const size_t block_end = TILE_SIZE * (blockIdx.x + 1); size_t count = 0; #pragma unroll for (size_t index = threadIdx.x + block_start; index < block_end; index += BLOCK_SIZE) { if (index < num_input) { for (size_t j = 0; j < fanout; j++) { if (edge_src[index * fanout + j] != Constant::kEmptyKey) { ++count; } } } } __shared__ typename BlockReduce::TempStorage temp_space; count = BlockReduce(temp_space).Sum(count); if (threadIdx.x == 0) { item_prefix[blockIdx.x] = count; if (blockIdx.x == 0) { item_prefix[gridDim.x] = 0; } } } template <size_t BLOCK_SIZE, size_t TILE_SIZE> __global__ void compact_edge(const IdType *tmp_src, const IdType *tmp_dst, IdType *out_src, IdType *out_dst, size_t *num_out, const size_t *item_prefix, const size_t num_input, const size_t fanout) { assert(BLOCK_SIZE == blockDim.x); using BlockScan = typename cub::BlockScan<size_t, BLOCK_SIZE>; constexpr const size_t VALS_PER_THREAD = TILE_SIZE / BLOCK_SIZE; __shared__ typename BlockScan::TempStorage temp_space; const size_t offset = item_prefix[blockIdx.x]; BlockPrefixCallbackOp<size_t> prefix_op(0); // count successful placements for (size_t i = 0; i < VALS_PER_THREAD; ++i) { const size_t index = threadIdx.x + i * BLOCK_SIZE + blockIdx.x * TILE_SIZE; size_t item_per_thread = 0; if (index < num_input) { for (size_t j = 0; j < fanout; j++) { if (tmp_src[index * fanout + j] != Constant::kEmptyKey) { item_per_thread++; } } } size_t item_prefix_per_thread = item_per_thread; BlockScan(temp_space) .ExclusiveSum(item_prefix_per_thread, item_prefix_per_thread, prefix_op); __syncthreads(); for (size_t j = 0; j < item_per_thread; j++) { out_src[offset + item_prefix_per_thread + j] = tmp_src[index * fanout + j]; out_dst[offset + item_prefix_per_thread + j] = tmp_dst[index * fanout + j]; } } if (threadIdx.x == 0 && blockIdx.x == 0) { *num_out = item_prefix[gridDim.x]; } } } // namespace void GPUSampleWeightedKHopHashDedup(const IdType *indptr, const IdType *indices, const float *prob_table, const IdType *alias_table, const IdType *input, const size_t num_input, const size_t fanout, IdType *out_src, IdType *out_dst, size_t *num_out, Context ctx, StreamHandle stream, GPURandomStates *random_states, uint64_t task_key) { LOG(DEBUG) << "GPUSample: begin with num_input " << num_input << " and fanout " << fanout; Timer t0; const size_t num_tiles = RoundUpDiv(num_input, Constant::kCudaTileSize); const dim3 grid(num_tiles); const dim3 block(Constant::kCudaBlockSize); auto sampler_device = Device::Get(ctx); auto cu_stream = static_cast<cudaStream_t>(stream); IdType *tmp_src = static_cast<IdType *>( sampler_device->AllocWorkspace(ctx, sizeof(IdType) * num_input * fanout)); IdType *tmp_dst = static_cast<IdType *>( sampler_device->AllocWorkspace(ctx, sizeof(IdType) * num_input * fanout)); LOG(DEBUG) << "GPUSample: cuda tmp_src malloc " << ToReadableSize(num_input * fanout * sizeof(IdType)); LOG(DEBUG) << "GPUSample: cuda tmp_dst malloc " << ToReadableSize(num_input * fanout * sizeof(IdType)); sample_weighted_khop_hash_dedup<Constant::kCudaBlockSize, Constant::kCudaTileSize> <<<grid, block, 0, cu_stream>>>( indptr, indices, prob_table, alias_table, input, num_input, fanout, tmp_src, tmp_dst, random_states->GetStates(), random_states->NumStates()); sampler_device->StreamSync(ctx, stream); double sample_time = t0.Passed(); Timer t1; size_t *item_prefix = static_cast<size_t *>( sampler_device->AllocWorkspace(ctx, sizeof(size_t) * (grid.x + 1))); LOG(DEBUG) << "GPUSample: cuda item_prefix malloc " << ToReadableSize(sizeof(size_t) * (grid.x + 1)); count_edge<Constant::kCudaBlockSize, Constant::kCudaTileSize> <<<grid, block, 0, cu_stream>>>(tmp_src, item_prefix, num_input, fanout); sampler_device->StreamSync(ctx, stream); double count_edge_time = t1.Passed(); Timer t2; size_t workspace_bytes; CUDA_CALL(cub::DeviceScan::ExclusiveSum( nullptr, workspace_bytes, static_cast<size_t *>(nullptr), static_cast<size_t *>(nullptr), grid.x + 1, cu_stream)); sampler_device->StreamSync(ctx, stream); void *workspace = sampler_device->AllocWorkspace(ctx, workspace_bytes); CUDA_CALL(cub::DeviceScan::ExclusiveSum(workspace, workspace_bytes, item_prefix, item_prefix, grid.x + 1, cu_stream)); sampler_device->StreamSync(ctx, stream); LOG(DEBUG) << "GPUSample: cuda workspace malloc " << ToReadableSize(workspace_bytes); compact_edge<Constant::kCudaBlockSize, Constant::kCudaTileSize> <<<grid, block, 0, cu_stream>>>(tmp_src, tmp_dst, out_src, out_dst, num_out, item_prefix, num_input, fanout); sampler_device->StreamSync(ctx, stream); double compact_edge_time = t2.Passed(); sampler_device->FreeWorkspace(ctx, workspace); sampler_device->FreeWorkspace(ctx, item_prefix); sampler_device->FreeWorkspace(ctx, tmp_src); sampler_device->FreeWorkspace(ctx, tmp_dst); Profiler::Get().LogStepAdd(task_key, kLogL3KHopSampleCooTime, sample_time); Profiler::Get().LogStepAdd(task_key, kLogL3KHopSampleCountEdgeTime, count_edge_time); Profiler::Get().LogStepAdd(task_key, kLogL3KHopSampleCompactEdgesTime, compact_edge_time); LOG(DEBUG) << "GPUSample: succeed "; } } // namespace cuda } // namespace common } // namespace samgraph
1c66537f7864f6a3552bcfa4106ae1cf7b54899b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2016 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ExampleUpdater_hip.cuh" /*! \file ExampleUpdater.cu \brief CUDA kernels for ExampleUpdater */ // First, the kernel code for zeroing the velocities on the GPU //! Kernel that zeroes velocities on the GPU /*! \param d_vel Velocity-mass array from the ParticleData \param N Number of particles This kernel executes one thread per particle and zeros the velocity of each. It can be run with any 1D block size as long as block_size * num_blocks is >= the number of particles. */ extern "C" __global__ void gpu_zero_velocities_kernel(Scalar4 *d_vel, unsigned int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { // vel.w is the mass, don't want to modify that Scalar4 vel = d_vel[idx]; vel.x = vel.y = vel.z = 0.0f; d_vel[idx] = vel; } } /*! \param d_vel Velocity-mass array from the ParticleData \param N Number of particles This is just a driver for gpu_zero_velocities_kernel(), see it for the details */ hipError_t gpu_zero_velocities(Scalar4 *d_vel, unsigned int N) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (int)ceil((double)N / (double)block_size), 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( gpu_zero_velocities_kernel), dim3(grid), dim3(threads) , 0, 0, d_vel, N); // this method always succeds. If you had a cuda* call in this driver, you could return its error code if not // hipSuccess return hipSuccess; }
1c66537f7864f6a3552bcfa4106ae1cf7b54899b.cu
// Copyright (c) 2009-2016 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ExampleUpdater.cuh" /*! \file ExampleUpdater.cu \brief CUDA kernels for ExampleUpdater */ // First, the kernel code for zeroing the velocities on the GPU //! Kernel that zeroes velocities on the GPU /*! \param d_vel Velocity-mass array from the ParticleData \param N Number of particles This kernel executes one thread per particle and zeros the velocity of each. It can be run with any 1D block size as long as block_size * num_blocks is >= the number of particles. */ extern "C" __global__ void gpu_zero_velocities_kernel(Scalar4 *d_vel, unsigned int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { // vel.w is the mass, don't want to modify that Scalar4 vel = d_vel[idx]; vel.x = vel.y = vel.z = 0.0f; d_vel[idx] = vel; } } /*! \param d_vel Velocity-mass array from the ParticleData \param N Number of particles This is just a driver for gpu_zero_velocities_kernel(), see it for the details */ cudaError_t gpu_zero_velocities(Scalar4 *d_vel, unsigned int N) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (int)ceil((double)N / (double)block_size), 1, 1); dim3 threads(block_size, 1, 1); // run the kernel gpu_zero_velocities_kernel<<< grid, threads >>>(d_vel, N); // this method always succeds. If you had a cuda* call in this driver, you could return its error code if not // cudaSuccess return cudaSuccess; }
e889eb550d958504aaf8e91e79a647fc02564d98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ATen/ATen.h" #include "ATen/AccumulateType.h" #include "ATen/TensorUtils.h" #include "ATen/core/Error.h" #include "ATen/hip/HIPContext.h" #include <THH/THHDeviceUtils.cuh> #include <THH/THHTensorMathReduce.cuh> namespace at { namespace native { namespace { // Block size for weight_norm_*_first_dim_kernel. // Currently, kernels are non-persistent. // Dialing up the block size to, say 1024, can improve performance by // increase the amount of cache available per block, which can improve cache hit rate. // However, this is less efficient for short rows. 256 is pretty versatile. // May be worth implementing heuristics later. #define BLOCK 256 // Block size for weight_norm_*_last_dim_kernel. // This is tricker than the first_dim case because we must make blocks // at least 16 fast elements wide to ensure fully-coalesced half-precision accesses. // Since output-element parallelism is along the fast dimension, this reduces the number of // blocks we can launch by 16X. #define TILE_W 16 // Somewhat versatile strategy: max out intra-block parallelism by extending // blocks across the slow dimension up to the hardware-max block size of 1024. #define TILE_H 64 template<typename T, typename ReduceOp> __device__ __forceinline__ void reduce_block_into_lanes (T *x, T val, int lanes, // lanes is intended to be <= 32. ReduceOp reduceOp) { int tid = threadIdx.x + threadIdx.y*blockDim.x; int blockSize = blockDim.x*blockDim.y; // blockSize is intended to be a multiple of 32. if(blockSize >= 64) { x[tid] = val; __syncthreads(); } #pragma unroll for(int i = (blockSize >> 1); i >= 64; i >>= 1) { if(tid < i) x[tid] = reduceOp(x[tid], x[tid+i]); __syncthreads(); } if(tid < 32) { T final; if(blockSize >= 64) final = reduceOp(x[tid], x[tid+32]); else final = val; // __SYNCWARP(); #pragma unroll for(int i = 16; i >= lanes; i >>= 1) final = reduceOp(final, WARP_SHFL_DOWN(final, i)); if(tid < lanes) x[tid] = final; // EpilogueOp } // Make sure the smem result is visible to all warps. __syncthreads(); } template <typename scalar_t, typename accscalar_t> __global__ void weight_norm_fwd_first_dim_kernel (scalar_t* __restrict__ w, accscalar_t* __restrict__ norms, const scalar_t* __restrict__ v, const scalar_t* __restrict__ g, const int rowSize) { // We are norming each slowest-dim row of the tensor separately. // For now, assign one block to each row. const int tid = threadIdx.x; const int row = blockIdx.x; const int stride = blockDim.x; // Logical index offset for this flattened row const int rowStart = row*rowSize; // Hack to get around nvcc complaining when an smem array is declared with the same name // but different types in different kernels (in this case different instantiations) // extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s" extern __shared__ char buf[]; accscalar_t* s = (accscalar_t*)buf; accscalar_t thread_sum = 0.f; for(int i = tid; i < rowSize; i += stride ) { accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]); thread_sum += val_f*val_f; // AccumOp, could do Kahan here } reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>()); accscalar_t result = s[0]; result = sqrtf(result); if(tid == 0) norms[row] = result; // Broadcast load, could use shared memory instead. accscalar_t g_this_row = scalar_cast<accscalar_t>(g[row]); accscalar_t rnorm = 1.f/result; // for consistency with backward kernel // Write data to output for(int i = tid; i < rowSize; i += stride ) { accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]); w[i+rowStart] = scalar_cast<scalar_t>(g_this_row*val_f*rnorm); } } template <typename scalar_t, typename accscalar_t> __global__ void weight_norm_fwd_last_dim_kernel ( scalar_t* __restrict__ w, accscalar_t* __restrict__ norms, const scalar_t* __restrict__ v, const scalar_t* __restrict__ g, const int fast_dim_size, const int slower_dims_size ) { const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ char buf[]; accscalar_t* alloc = (accscalar_t*)buf; accscalar_t* s = &alloc[0]; accscalar_t* rnorms_this_block = &alloc[blockDim.x*blockDim.y]; accscalar_t thread_sum = 0.f; int slower_dims_location = threadIdx.y; int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location; if(fast_dim_location < fast_dim_size) while(slower_dims_location < slower_dims_size) { accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]); thread_sum += val_f*val_f; // AccumOp, could do Kahan here currentIdx += blockDim.y*fast_dim_size; slower_dims_location += blockDim.y; } reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>()); // Better to pass an EpilogueOp to reduce_block_into_lanes? if(threadIdx.y == 0) { accscalar_t result = s[threadIdx.x]; accscalar_t norm_this_col = sqrtf(result); norms[fast_dim_location] = norm_this_col; rnorms_this_block[threadIdx.x] = 1.f/norm_this_col; } __syncthreads(); accscalar_t g_this_col = scalar_cast<accscalar_t>(g[fast_dim_location]); accscalar_t rnorm = rnorms_this_block[threadIdx.x]; slower_dims_location = threadIdx.y; currentIdx = fast_dim_location + fast_dim_size*slower_dims_location; if(fast_dim_location < fast_dim_size) while(slower_dims_location < slower_dims_size) { accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]); w[currentIdx] = scalar_cast<scalar_t>(g_this_col*val_f*rnorm); currentIdx += blockDim.y*fast_dim_size; slower_dims_location += blockDim.y; } } template <typename scalar_t, typename accscalar_t> __global__ void weight_norm_bwd_first_dim_kernel (scalar_t* __restrict__ grad_v, scalar_t* __restrict__ grad_g, const scalar_t* __restrict__ grad_w, const scalar_t* __restrict__ saved_v, const scalar_t* __restrict__ saved_g, const accscalar_t* __restrict__ saved_norms, const int rowSize) { // For now, assign one block to each row. const int tid = threadIdx.x; const int row = blockIdx.x; const int stride = blockDim.x; // Logical index offset for this flattened row const int rowStart = row*rowSize; // Hack to get around nvcc complaining when an smem array is declared with the same name // but different types in different kernels (in this case different instantiations) // extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s" extern __shared__ char buf[]; accscalar_t* s = (accscalar_t*)buf; accscalar_t thread_sum = 0.f; for(int i = tid; i < rowSize; i += stride ) { accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[i+rowStart]); accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[i+rowStart]); thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here } reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>()); accscalar_t result = s[0]; // Could choose to save reciprocal of norm instead I suppose, but norms is probably // more handy to keep around. // Broadcast load; could use shared memory instead. accscalar_t rnorm = 1.f/saved_norms[row]; accscalar_t rnorm3 = rnorm*rnorm*rnorm; // Write g gradients. if(tid == 0) grad_g[row] = scalar_cast<scalar_t>(result*rnorm); // Broadcast load, could use shared memory instead. accscalar_t g_this_row = scalar_cast<accscalar_t>(saved_g[row]); // Write v gradients. We are reusing values that were loaded earlier, so there // is an optimization opportunity here (store values persistently). for(int j = tid; j < rowSize; j += stride ) { accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[j+rowStart]); accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[j+rowStart]); accscalar_t grad_vj = g_this_row*(rnorm*grad_wj - rnorm3*saved_vj*result); grad_v[j+rowStart] = scalar_cast<scalar_t>(grad_vj); } } template <typename scalar_t, typename accscalar_t> __global__ void weight_norm_bwd_last_dim_kernel (scalar_t* __restrict__ grad_v, scalar_t* __restrict__ grad_g, const scalar_t* __restrict__ grad_w, const scalar_t* __restrict__ saved_v, const scalar_t* __restrict__ saved_g, const accscalar_t* __restrict__ saved_norms, const int fast_dim_size, const int slower_dims_size) { const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ char buf[]; accscalar_t* s = (accscalar_t*)buf; accscalar_t thread_sum = 0.f; int slower_dims_location = threadIdx.y; int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location; if(fast_dim_location < fast_dim_size) while(slower_dims_location < slower_dims_size) { accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[currentIdx]); accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[currentIdx]); thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here currentIdx += blockDim.y*fast_dim_size; slower_dims_location += blockDim.y; } reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>()); accscalar_t result = s[threadIdx.x]; // Broadcast load; could use shared memory instead. accscalar_t rnorm = 1.f/saved_norms[fast_dim_location]; accscalar_t rnorm3 = rnorm*rnorm*rnorm; // Write g gradients. if(threadIdx.y == 0) grad_g[fast_dim_location] = scalar_cast<scalar_t>(result*rnorm); // Entire block pulls these values, could use shared memory instead. accscalar_t g_this_col = scalar_cast<accscalar_t>(saved_g[fast_dim_location]); // Write v gradients. slower_dims_location = threadIdx.y; currentIdx = fast_dim_location + fast_dim_size*slower_dims_location; if(fast_dim_location < fast_dim_size) while(slower_dims_location < slower_dims_size) { accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[currentIdx]); accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[currentIdx]); accscalar_t grad_vj = g_this_col*(rnorm*grad_wj - rnorm3*saved_vj*result); grad_v[currentIdx] = scalar_cast<scalar_t>(grad_vj); currentIdx += blockDim.y*fast_dim_size; slower_dims_location += blockDim.y; } } } // anonymous namespace std::tuple<Tensor,Tensor> weight_norm_cuda (const Tensor & v, const Tensor & g, int64_t dim) { auto w = at::empty_like(v); // weight_norm_fused does have a derivative defined in derivatives.yaml, therefore, VariableType.cpp // sends the unpacked g.data() as the argument. In other words, we expect "g" is a bare Tensor here. // norms is only needed to stash for backward. // g.type().scalarType() may be at::ScalarType::Double, Float, or Half. // If Half, stash norms as float. at::ScalarType AccType = g.type().scalarType() == at::ScalarType::Half ? at::ScalarType::Float : g.type().scalarType(); // Will this create norms on the same device as g, regardless of what the thread's default // current device is? I believe so, because Type::* functions are DeviceGuard()ed. auto norms = at::empty_strided(g.sizes(), g.strides(), g.options().dtype(AccType)); const int ndims = v.dim(); if(dim == 0) { // Find logical size of each flattened slowest-dim row int rowSize = 1; for(int i = ndims - 1; i > 0; i--) rowSize *= v.size(i); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF (v.type(), "weight_norm_fwd_first_dim_kernel", [&] { using accscalar_t = acc_type<scalar_t, true>; hipLaunchKernelGGL(( weight_norm_fwd_first_dim_kernel<scalar_t, accscalar_t>) , dim3(v.size(0)), dim3(BLOCK), BLOCK*sizeof(accscalar_t), stream, w.data<scalar_t>(), norms.data<accscalar_t>(), v.data<scalar_t>(), g.data<scalar_t>(), rowSize); }); } else if(dim == ndims - 1) { // Precompute slower_dims_size and fast_dim_size int slower_dims_size = 1; for(int i = 0; i < ndims - 1; i++) slower_dims_size *= v.size(i); int fast_dim_size = v.size(ndims-1); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF (v.type(), "weight_norm_fwd_last_dim_kernel", [&] { using accscalar_t = acc_type<scalar_t, true>; hipLaunchKernelGGL(( weight_norm_fwd_last_dim_kernel<scalar_t, accscalar_t>) , dim3((fast_dim_size+TILE_W-1)/TILE_W), dim3(dim3(TILE_W,TILE_H)), (TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t), stream, w.data<scalar_t>(), norms.data<accscalar_t>(), v.data<scalar_t>(), g.data<scalar_t>(), fast_dim_size, slower_dims_size); }); } // The kernel execution is asynchronous, so this will only catch errors on the kernel launch, // not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught // until a later error check on a synchronizing CUDA call. Unfortunately, without manually // synchronizing here, this is the best we can do. THCudaCheck(hipGetLastError()); return std::tuple<Tensor, Tensor>{w, norms}; } std::tuple<Tensor, Tensor> weight_norm_cuda_backward (const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) { // These checks should always succeed, because weight_norm_fused_backward should only // ever be recorded in the autograd graph via weight_norm, which passes contiguous v and g. AT_CHECK(saved_v.is_contiguous(), "saved_v must be contiguous"); AT_CHECK(saved_g.is_contiguous(), "saved_g must be contiguous"); AT_CHECK(saved_norms.is_contiguous(), "saved_norms must be contiguous"); AT_CHECK(dim == 0 || dim == saved_v.dim() - 1, "fused kernels can only be applied for first or last dim") auto grad_v = at::empty_like(saved_v); auto grad_g = at::empty_like(saved_g); const int ndims = saved_v.dim(); if(dim == 0) { // Find logical size of each flattened slowest-dim row int rowSize = 1; for(int i = ndims - 1; i > 0; i--) rowSize *= saved_v.size(i); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF (saved_v.type(), "weight_norm_bwd_first_dim_kernel", [&] { using accscalar_t = acc_type<scalar_t, true>; hipLaunchKernelGGL(( weight_norm_bwd_first_dim_kernel<scalar_t, accscalar_t>) , dim3(grad_w.size(0)), dim3( BLOCK), BLOCK*sizeof(accscalar_t), stream, grad_v.data<scalar_t>(), grad_g.data<scalar_t>(), grad_w.data<scalar_t>(), saved_v.data<scalar_t>(), saved_g.data<scalar_t>(), saved_norms.data<accscalar_t>(), rowSize); }); } else if(dim == ndims - 1) { // Precompute slower_dims_size and fast_dim_size because they involve dynamically indexing an array. int slower_dims_size = 1; for(int i = 0; i < ndims - 1; i++) slower_dims_size *= saved_v.size(i); int fast_dim_size = saved_v.size(ndims-1); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF (saved_v.type(), "weight_norm_bwd_last_dim_kernel", [&] { using accscalar_t = acc_type<scalar_t, true>; hipLaunchKernelGGL(( weight_norm_bwd_last_dim_kernel<scalar_t, accscalar_t>) , dim3((fast_dim_size+TILE_W-1)/TILE_W), dim3(dim3(TILE_W,TILE_H)), (TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t), stream, grad_v.data<scalar_t>(), grad_g.data<scalar_t>(), grad_w.data<scalar_t>(), saved_v.data<scalar_t>(), saved_g.data<scalar_t>(), saved_norms.data<accscalar_t>(), fast_dim_size, slower_dims_size); }); } // The kernel execution is asynchronous, so this will only catch errors on the kernel launch, // not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught // until a later error check on a synchronizing CUDA call. Unfortunately, without manually // synchronizing here, this is the best we can do. THCudaCheck(hipGetLastError()); return std::tuple<Tensor, Tensor>{grad_v, grad_g}; } #undef BLOCK #undef TILE_W #undef TILE_H } // namespace native } // namespace at
e889eb550d958504aaf8e91e79a647fc02564d98.cu
#include "ATen/ATen.h" #include "ATen/AccumulateType.h" #include "ATen/TensorUtils.h" #include "ATen/core/Error.h" #include "ATen/cuda/CUDAContext.h" #include <THC/THCDeviceUtils.cuh> #include <THC/THCTensorMathReduce.cuh> namespace at { namespace native { namespace { // Block size for weight_norm_*_first_dim_kernel. // Currently, kernels are non-persistent. // Dialing up the block size to, say 1024, can improve performance by // increase the amount of cache available per block, which can improve cache hit rate. // However, this is less efficient for short rows. 256 is pretty versatile. // May be worth implementing heuristics later. #define BLOCK 256 // Block size for weight_norm_*_last_dim_kernel. // This is tricker than the first_dim case because we must make blocks // at least 16 fast elements wide to ensure fully-coalesced half-precision accesses. // Since output-element parallelism is along the fast dimension, this reduces the number of // blocks we can launch by 16X. #define TILE_W 16 // Somewhat versatile strategy: max out intra-block parallelism by extending // blocks across the slow dimension up to the hardware-max block size of 1024. #define TILE_H 64 template<typename T, typename ReduceOp> __device__ __forceinline__ void reduce_block_into_lanes (T *x, T val, int lanes, // lanes is intended to be <= 32. ReduceOp reduceOp) { int tid = threadIdx.x + threadIdx.y*blockDim.x; int blockSize = blockDim.x*blockDim.y; // blockSize is intended to be a multiple of 32. if(blockSize >= 64) { x[tid] = val; __syncthreads(); } #pragma unroll for(int i = (blockSize >> 1); i >= 64; i >>= 1) { if(tid < i) x[tid] = reduceOp(x[tid], x[tid+i]); __syncthreads(); } if(tid < 32) { T final; if(blockSize >= 64) final = reduceOp(x[tid], x[tid+32]); else final = val; // __SYNCWARP(); #pragma unroll for(int i = 16; i >= lanes; i >>= 1) final = reduceOp(final, WARP_SHFL_DOWN(final, i)); if(tid < lanes) x[tid] = final; // EpilogueOp } // Make sure the smem result is visible to all warps. __syncthreads(); } template <typename scalar_t, typename accscalar_t> __global__ void weight_norm_fwd_first_dim_kernel (scalar_t* __restrict__ w, accscalar_t* __restrict__ norms, const scalar_t* __restrict__ v, const scalar_t* __restrict__ g, const int rowSize) { // We are norming each slowest-dim row of the tensor separately. // For now, assign one block to each row. const int tid = threadIdx.x; const int row = blockIdx.x; const int stride = blockDim.x; // Logical index offset for this flattened row const int rowStart = row*rowSize; // Hack to get around nvcc complaining when an smem array is declared with the same name // but different types in different kernels (in this case different instantiations) // extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s" extern __shared__ char buf[]; accscalar_t* s = (accscalar_t*)buf; accscalar_t thread_sum = 0.f; for(int i = tid; i < rowSize; i += stride ) { accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]); thread_sum += val_f*val_f; // AccumOp, could do Kahan here } reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>()); accscalar_t result = s[0]; result = sqrtf(result); if(tid == 0) norms[row] = result; // Broadcast load, could use shared memory instead. accscalar_t g_this_row = scalar_cast<accscalar_t>(g[row]); accscalar_t rnorm = 1.f/result; // for consistency with backward kernel // Write data to output for(int i = tid; i < rowSize; i += stride ) { accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]); w[i+rowStart] = scalar_cast<scalar_t>(g_this_row*val_f*rnorm); } } template <typename scalar_t, typename accscalar_t> __global__ void weight_norm_fwd_last_dim_kernel ( scalar_t* __restrict__ w, accscalar_t* __restrict__ norms, const scalar_t* __restrict__ v, const scalar_t* __restrict__ g, const int fast_dim_size, const int slower_dims_size ) { const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ char buf[]; accscalar_t* alloc = (accscalar_t*)buf; accscalar_t* s = &alloc[0]; accscalar_t* rnorms_this_block = &alloc[blockDim.x*blockDim.y]; accscalar_t thread_sum = 0.f; int slower_dims_location = threadIdx.y; int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location; if(fast_dim_location < fast_dim_size) while(slower_dims_location < slower_dims_size) { accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]); thread_sum += val_f*val_f; // AccumOp, could do Kahan here currentIdx += blockDim.y*fast_dim_size; slower_dims_location += blockDim.y; } reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>()); // Better to pass an EpilogueOp to reduce_block_into_lanes? if(threadIdx.y == 0) { accscalar_t result = s[threadIdx.x]; accscalar_t norm_this_col = sqrtf(result); norms[fast_dim_location] = norm_this_col; rnorms_this_block[threadIdx.x] = 1.f/norm_this_col; } __syncthreads(); accscalar_t g_this_col = scalar_cast<accscalar_t>(g[fast_dim_location]); accscalar_t rnorm = rnorms_this_block[threadIdx.x]; slower_dims_location = threadIdx.y; currentIdx = fast_dim_location + fast_dim_size*slower_dims_location; if(fast_dim_location < fast_dim_size) while(slower_dims_location < slower_dims_size) { accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]); w[currentIdx] = scalar_cast<scalar_t>(g_this_col*val_f*rnorm); currentIdx += blockDim.y*fast_dim_size; slower_dims_location += blockDim.y; } } template <typename scalar_t, typename accscalar_t> __global__ void weight_norm_bwd_first_dim_kernel (scalar_t* __restrict__ grad_v, scalar_t* __restrict__ grad_g, const scalar_t* __restrict__ grad_w, const scalar_t* __restrict__ saved_v, const scalar_t* __restrict__ saved_g, const accscalar_t* __restrict__ saved_norms, const int rowSize) { // For now, assign one block to each row. const int tid = threadIdx.x; const int row = blockIdx.x; const int stride = blockDim.x; // Logical index offset for this flattened row const int rowStart = row*rowSize; // Hack to get around nvcc complaining when an smem array is declared with the same name // but different types in different kernels (in this case different instantiations) // extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s" extern __shared__ char buf[]; accscalar_t* s = (accscalar_t*)buf; accscalar_t thread_sum = 0.f; for(int i = tid; i < rowSize; i += stride ) { accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[i+rowStart]); accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[i+rowStart]); thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here } reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>()); accscalar_t result = s[0]; // Could choose to save reciprocal of norm instead I suppose, but norms is probably // more handy to keep around. // Broadcast load; could use shared memory instead. accscalar_t rnorm = 1.f/saved_norms[row]; accscalar_t rnorm3 = rnorm*rnorm*rnorm; // Write g gradients. if(tid == 0) grad_g[row] = scalar_cast<scalar_t>(result*rnorm); // Broadcast load, could use shared memory instead. accscalar_t g_this_row = scalar_cast<accscalar_t>(saved_g[row]); // Write v gradients. We are reusing values that were loaded earlier, so there // is an optimization opportunity here (store values persistently). for(int j = tid; j < rowSize; j += stride ) { accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[j+rowStart]); accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[j+rowStart]); accscalar_t grad_vj = g_this_row*(rnorm*grad_wj - rnorm3*saved_vj*result); grad_v[j+rowStart] = scalar_cast<scalar_t>(grad_vj); } } template <typename scalar_t, typename accscalar_t> __global__ void weight_norm_bwd_last_dim_kernel (scalar_t* __restrict__ grad_v, scalar_t* __restrict__ grad_g, const scalar_t* __restrict__ grad_w, const scalar_t* __restrict__ saved_v, const scalar_t* __restrict__ saved_g, const accscalar_t* __restrict__ saved_norms, const int fast_dim_size, const int slower_dims_size) { const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ char buf[]; accscalar_t* s = (accscalar_t*)buf; accscalar_t thread_sum = 0.f; int slower_dims_location = threadIdx.y; int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location; if(fast_dim_location < fast_dim_size) while(slower_dims_location < slower_dims_size) { accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[currentIdx]); accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[currentIdx]); thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here currentIdx += blockDim.y*fast_dim_size; slower_dims_location += blockDim.y; } reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>()); accscalar_t result = s[threadIdx.x]; // Broadcast load; could use shared memory instead. accscalar_t rnorm = 1.f/saved_norms[fast_dim_location]; accscalar_t rnorm3 = rnorm*rnorm*rnorm; // Write g gradients. if(threadIdx.y == 0) grad_g[fast_dim_location] = scalar_cast<scalar_t>(result*rnorm); // Entire block pulls these values, could use shared memory instead. accscalar_t g_this_col = scalar_cast<accscalar_t>(saved_g[fast_dim_location]); // Write v gradients. slower_dims_location = threadIdx.y; currentIdx = fast_dim_location + fast_dim_size*slower_dims_location; if(fast_dim_location < fast_dim_size) while(slower_dims_location < slower_dims_size) { accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[currentIdx]); accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[currentIdx]); accscalar_t grad_vj = g_this_col*(rnorm*grad_wj - rnorm3*saved_vj*result); grad_v[currentIdx] = scalar_cast<scalar_t>(grad_vj); currentIdx += blockDim.y*fast_dim_size; slower_dims_location += blockDim.y; } } } // anonymous namespace std::tuple<Tensor,Tensor> weight_norm_cuda (const Tensor & v, const Tensor & g, int64_t dim) { auto w = at::empty_like(v); // weight_norm_fused does have a derivative defined in derivatives.yaml, therefore, VariableType.cpp // sends the unpacked g.data() as the argument. In other words, we expect "g" is a bare Tensor here. // norms is only needed to stash for backward. // g.type().scalarType() may be at::ScalarType::Double, Float, or Half. // If Half, stash norms as float. at::ScalarType AccType = g.type().scalarType() == at::ScalarType::Half ? at::ScalarType::Float : g.type().scalarType(); // Will this create norms on the same device as g, regardless of what the thread's default // current device is? I believe so, because Type::* functions are DeviceGuard()ed. auto norms = at::empty_strided(g.sizes(), g.strides(), g.options().dtype(AccType)); const int ndims = v.dim(); if(dim == 0) { // Find logical size of each flattened slowest-dim row int rowSize = 1; for(int i = ndims - 1; i > 0; i--) rowSize *= v.size(i); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF (v.type(), "weight_norm_fwd_first_dim_kernel", [&] { using accscalar_t = acc_type<scalar_t, true>; weight_norm_fwd_first_dim_kernel<scalar_t, accscalar_t> <<<v.size(0), BLOCK, BLOCK*sizeof(accscalar_t), stream>>> (w.data<scalar_t>(), norms.data<accscalar_t>(), v.data<scalar_t>(), g.data<scalar_t>(), rowSize); }); } else if(dim == ndims - 1) { // Precompute slower_dims_size and fast_dim_size int slower_dims_size = 1; for(int i = 0; i < ndims - 1; i++) slower_dims_size *= v.size(i); int fast_dim_size = v.size(ndims-1); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF (v.type(), "weight_norm_fwd_last_dim_kernel", [&] { using accscalar_t = acc_type<scalar_t, true>; weight_norm_fwd_last_dim_kernel<scalar_t, accscalar_t> <<<(fast_dim_size+TILE_W-1)/TILE_W, dim3(TILE_W,TILE_H), (TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t), stream>>> (w.data<scalar_t>(), norms.data<accscalar_t>(), v.data<scalar_t>(), g.data<scalar_t>(), fast_dim_size, slower_dims_size); }); } // The kernel execution is asynchronous, so this will only catch errors on the kernel launch, // not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught // until a later error check on a synchronizing CUDA call. Unfortunately, without manually // synchronizing here, this is the best we can do. THCudaCheck(cudaGetLastError()); return std::tuple<Tensor, Tensor>{w, norms}; } std::tuple<Tensor, Tensor> weight_norm_cuda_backward (const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) { // These checks should always succeed, because weight_norm_fused_backward should only // ever be recorded in the autograd graph via weight_norm, which passes contiguous v and g. AT_CHECK(saved_v.is_contiguous(), "saved_v must be contiguous"); AT_CHECK(saved_g.is_contiguous(), "saved_g must be contiguous"); AT_CHECK(saved_norms.is_contiguous(), "saved_norms must be contiguous"); AT_CHECK(dim == 0 || dim == saved_v.dim() - 1, "fused kernels can only be applied for first or last dim") auto grad_v = at::empty_like(saved_v); auto grad_g = at::empty_like(saved_g); const int ndims = saved_v.dim(); if(dim == 0) { // Find logical size of each flattened slowest-dim row int rowSize = 1; for(int i = ndims - 1; i > 0; i--) rowSize *= saved_v.size(i); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF (saved_v.type(), "weight_norm_bwd_first_dim_kernel", [&] { using accscalar_t = acc_type<scalar_t, true>; weight_norm_bwd_first_dim_kernel<scalar_t, accscalar_t> <<<grad_w.size(0), BLOCK, BLOCK*sizeof(accscalar_t), stream>>> (grad_v.data<scalar_t>(), grad_g.data<scalar_t>(), grad_w.data<scalar_t>(), saved_v.data<scalar_t>(), saved_g.data<scalar_t>(), saved_norms.data<accscalar_t>(), rowSize); }); } else if(dim == ndims - 1) { // Precompute slower_dims_size and fast_dim_size because they involve dynamically indexing an array. int slower_dims_size = 1; for(int i = 0; i < ndims - 1; i++) slower_dims_size *= saved_v.size(i); int fast_dim_size = saved_v.size(ndims-1); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF (saved_v.type(), "weight_norm_bwd_last_dim_kernel", [&] { using accscalar_t = acc_type<scalar_t, true>; weight_norm_bwd_last_dim_kernel<scalar_t, accscalar_t> <<<(fast_dim_size+TILE_W-1)/TILE_W, dim3(TILE_W,TILE_H), (TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t), stream>>> (grad_v.data<scalar_t>(), grad_g.data<scalar_t>(), grad_w.data<scalar_t>(), saved_v.data<scalar_t>(), saved_g.data<scalar_t>(), saved_norms.data<accscalar_t>(), fast_dim_size, slower_dims_size); }); } // The kernel execution is asynchronous, so this will only catch errors on the kernel launch, // not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught // until a later error check on a synchronizing CUDA call. Unfortunately, without manually // synchronizing here, this is the best we can do. THCudaCheck(cudaGetLastError()); return std::tuple<Tensor, Tensor>{grad_v, grad_g}; } #undef BLOCK #undef TILE_W #undef TILE_H } // namespace native } // namespace at
6f485a0be01914ffe52a859eb42ddb5681ca17a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This file defines a CUDA inorder walk microbenchmark, which traverses an // array in order. This will print the times for each *block* in each // kernel invocation. Specify the -zc command-line argument to use zero-copy // memory, and -mm for managed memory. // // Usage: ./inorder_walk [-zc|-mm] #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/mman.h> // The seed to use for shuffling the array. #define RANDOM_SEED (1337) // The length, in number of 32-bit integers, of the array to walk #define ARRAY_LENGTH (1024 * 1024 * 64) // The number of steps in the random walk will equal these two values // multiplied together. #define STEPS_MAJOR (1) #define STEPS_MINOR (ARRAY_LENGTH / 262) // The total number of kernel invocations (measurement iterations) to perform. #define KERNEL_INVOCATIONS (10000) // These microbenchmarks will use 2 blocks of 32 threads. #define THREAD_COUNT (32) #define BLOCK_COUNT (2) // A macro which exits the program with an error message if the given value is // not equal to hipSuccess. #define CheckError(val) CheckCUDAError( (val), #val, __FILE__, __LINE__ ) static void CheckCUDAError(hipError_t value, const char *function, const char *filename, int line) { if (value == hipSuccess) return; printf("Cuda error %d. File %s, line %d: %s\n", (int) value, filename, line, function); exit(1); } // Holds variables and pointers that are passed between the phases of the // experiment. The times arrays hold the start and end time stamps for each // block, in the order [block1_start, block1_end, block2_start, ...]. typedef struct { uint64_t *device_times; uint64_t *host_times; uint32_t *host_array; uint32_t *device_array; uint8_t *host_outputs; uint8_t *device_outputs; hipStream_t stream; // This will be nonzero if we're using zero-copy memory. uint8_t zero_copy; // This will be nonzero if we're using managed memory. uint8_t managed_memory; } WalkState; // Converts a 64-bit count of nanoseconds to a floating-point number of // seconds. static double ConvertToSeconds(uint64_t nanoseconds) { return ((double) nanoseconds) / 1e9; } // Returns the value of CUDA's global nanosecond timer. static __device__ __inline__ uint64_t GlobalTimer64(void) { uint64_t to_return; asm volatile("mov.u64 %0, %%globaltimer;" : "=l"(to_return)); return to_return; } __global__ void DoInOrderWalk(uint32_t *array, uint8_t *outputs, uint64_t *times) { int i, j, current, thread_number; uint32_t sum = 0; if (threadIdx.x == 0) { times[blockIdx.x * 2] = GlobalTimer64(); } __syncthreads(); thread_number = (blockIdx.x * THREAD_COUNT) + threadIdx.x; //current = thread_number; for (i = 0; i < STEPS_MAJOR; i++) { for (j = 0; j < STEPS_MINOR; j++) { current = j; sum += array[current]; } } outputs[thread_number] = (uint8_t) sum; __syncthreads(); if (threadIdx.x == 0) { times[(blockIdx.x * 2) + 1] = GlobalTimer64(); } } // Returns a random 31-bit integer in the range [0, limit). static inline uint32_t Rand32(uint32_t limit) { return lrand48() % limit; } // Takes an array and randomly shuffles its contents. The length parameter is // the number of elements in the array. Won't work properly for arrays // containing over 2^32 elements. static void ShuffleArray(uint32_t *array, size_t length) { size_t i, j; uint32_t tmp; if (length <= 1) return; for (i = 0; i < length; i++) { j = i + Rand32(length - i); tmp = array[j]; array[j] = array[i]; array[i] = tmp; } } // Selects and initializes the device to run the benchmarks on. void Initialize(int sync_level) { hipError_t error = hipErrorInvalidValue; switch (sync_level) { case 0: error = hipSetDeviceFlags(hipDeviceScheduleSpin); break; case 1: error = hipSetDeviceFlags(hipDeviceScheduleYield); break; case 2: error = hipSetDeviceFlags(hipDeviceScheduleBlockingSync); break; case 3: error = hipSetDeviceFlags(hipDeviceMapHost); break; default: printf("Invalid sync level: %d\n", sync_level); exit(1); } CheckError(error); CheckError(hipSetDevice(0)); } // Allocates and initializes all values (including the stream), in the state // struct. Requires the name of a file containing the pre-generated random walk // array. void AllocateMemory(WalkState *state) { uint32_t i = 0; size_t array_size = ARRAY_LENGTH * sizeof(uint32_t); size_t output_array_size = THREAD_COUNT * BLOCK_COUNT * sizeof(uint8_t); size_t times_array_size = BLOCK_COUNT * 2 * sizeof(uint64_t); if (state->managed_memory) { CheckError(hipMallocManaged(&state->host_times, times_array_size)); CheckError(hipMallocManaged(&state->host_array, array_size)); CheckError(hipMallocManaged(&state->host_outputs, output_array_size)); } else { CheckError(hipHostMalloc(&state->host_times, times_array_size)); CheckError(hipHostMalloc(&state->host_array, array_size)); CheckError(hipHostMalloc(&state->host_outputs, output_array_size)); if (!state->zero_copy) { // Should we use zero-copy for the times array? For now, we will. CheckError(hipMalloc(&state->device_times, times_array_size)); CheckError(hipMalloc(&state->device_array, array_size)); CheckError(hipMalloc(&state->device_outputs, output_array_size)); } else { CheckError(hipHostGetDevicePointer(&state->device_times, state->host_times, 0)); CheckError(hipHostGetDevicePointer(&state->device_array, state->host_array, 0)); CheckError(hipHostGetDevicePointer(&state->device_outputs, state->host_outputs, 0)); } } CheckError(hipStreamCreate(&state->stream)); //printf("Generating inorder walk array... "); //fflush(stdout); for (i = 0; i < ARRAY_LENGTH; i++) { state->host_array[i] = i; } ShuffleArray(state->host_array, ARRAY_LENGTH); //printf("done!\n"); } // Copies input arrays to the device. For this particular benchmark, this only // needs to be called once, since the input array is only read, and the output // array is always completely overwritten. void CopyIn(WalkState *state) { if (state->zero_copy || state->managed_memory) return; size_t array_size = ARRAY_LENGTH * sizeof(uint32_t); CheckError(hipMemcpyAsync(state->device_array, state->host_array, array_size, hipMemcpyHostToDevice, state->stream)); CheckError(hipStreamSynchronize(state->stream)); } // Copies the output array from the device. Should be called after every // iteration, so that times can be recorded. void CopyOut(WalkState *state) { if (state->zero_copy || state->managed_memory) return; size_t output_array_size = THREAD_COUNT * BLOCK_COUNT * sizeof(uint8_t); size_t times_array_size = BLOCK_COUNT * 2 * sizeof(uint64_t); CheckError(hipMemcpyAsync(state->host_outputs, state->device_outputs, output_array_size, hipMemcpyDeviceToHost, state->stream)); CheckError(hipMemcpyAsync(state->host_times, state->device_times, times_array_size, hipMemcpyDeviceToHost, state->stream)); CheckError(hipStreamSynchronize(state->stream)); } // Frees memory and closes the device stream. This will also reset the // zero_copy field to 0. void FreeMemory(WalkState *state) { CheckError(hipStreamSynchronize(state->stream)); CheckError(hipStreamDestroy(state->stream)); if (!state->zero_copy && !state->managed_memory) { CheckError(hipFree(state->device_array)); CheckError(hipFree(state->device_outputs)); CheckError(hipFree(state->device_times)); } if (state->managed_memory) { CheckError(hipFree(state->host_array)); CheckError(hipFree(state->host_outputs)); CheckError(hipFree(state->host_times)); } else { CheckError(hipHostFree(state->host_array)); CheckError(hipHostFree(state->host_outputs)); CheckError(hipHostFree(state->host_times)); } memset(state, 0, sizeof(*state)); } // Checks command-line arguments and sets members of the state struct if any // are affected. May exit the program if any arguments are invalid. static void ParseArgs(int argc, char **argv, WalkState *state) { int i; state->zero_copy = 0; if (argc == 1) return; if (argc != 2) { printf("Usage: %s [-zc|-mm]\n" " Specify -zc to use zero-copy memory.\n Specify -mm to use managed memory", argv[0]); exit(1); } for (i = 1; i < argc; i++) { if (strncmp(argv[i], "-zc", 3) == 0) { state->zero_copy = 1; continue; } if (strncmp(argv[i], "-mm", 3) == 0) { state->managed_memory = 1; continue; } printf("Unknown argument: %s\n", argv[i]); exit(1); } } int main(int argc, char **argv) { int i, j; double block_start, block_end; WalkState state; srand48(RANDOM_SEED); ParseArgs(argc, argv, &state); // Initialize and allocate memory, then lock pages. Initialize(3); AllocateMemory(&state); if (!mlockall(MCL_CURRENT | MCL_FUTURE)) { printf("Error: failed locking pages in memory\n"); return 1; } dim3 threads_per_block(THREAD_COUNT); dim3 block_count(BLOCK_COUNT); // We only need to copy in one time; the input array doesn't change. CopyIn(&state); for (i = 0; i < KERNEL_INVOCATIONS; i++) { if (state.managed_memory) { hipLaunchKernelGGL(( DoInOrderWalk), dim3(block_count), dim3(threads_per_block), 0, state.stream, state.host_array, state.host_outputs, state.host_times); CheckError(hipDeviceSynchronize()); } else { hipLaunchKernelGGL(( DoInOrderWalk), dim3(block_count), dim3(threads_per_block), 0, state.stream, state.device_array, state.device_outputs, state.device_times); } CheckError(hipStreamSynchronize(state.stream)); CopyOut(&state); for (j = 0; j < BLOCK_COUNT; j++) { block_start = ConvertToSeconds(state.host_times[j * 2]); block_end = ConvertToSeconds(state.host_times[(j * 2) + 1]); printf("Block %d: start: %f end: %f elapsed: %f\n", j, block_start, block_end, block_end - block_start); } } FreeMemory(&state); CheckError(hipDeviceReset()); return 0; }
6f485a0be01914ffe52a859eb42ddb5681ca17a0.cu
// This file defines a CUDA inorder walk microbenchmark, which traverses an // array in order. This will print the times for each *block* in each // kernel invocation. Specify the -zc command-line argument to use zero-copy // memory, and -mm for managed memory. // // Usage: ./inorder_walk [-zc|-mm] #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/mman.h> // The seed to use for shuffling the array. #define RANDOM_SEED (1337) // The length, in number of 32-bit integers, of the array to walk #define ARRAY_LENGTH (1024 * 1024 * 64) // The number of steps in the random walk will equal these two values // multiplied together. #define STEPS_MAJOR (1) #define STEPS_MINOR (ARRAY_LENGTH / 262) // The total number of kernel invocations (measurement iterations) to perform. #define KERNEL_INVOCATIONS (10000) // These microbenchmarks will use 2 blocks of 32 threads. #define THREAD_COUNT (32) #define BLOCK_COUNT (2) // A macro which exits the program with an error message if the given value is // not equal to cudaSuccess. #define CheckError(val) CheckCUDAError( (val), #val, __FILE__, __LINE__ ) static void CheckCUDAError(cudaError_t value, const char *function, const char *filename, int line) { if (value == cudaSuccess) return; printf("Cuda error %d. File %s, line %d: %s\n", (int) value, filename, line, function); exit(1); } // Holds variables and pointers that are passed between the phases of the // experiment. The times arrays hold the start and end time stamps for each // block, in the order [block1_start, block1_end, block2_start, ...]. typedef struct { uint64_t *device_times; uint64_t *host_times; uint32_t *host_array; uint32_t *device_array; uint8_t *host_outputs; uint8_t *device_outputs; cudaStream_t stream; // This will be nonzero if we're using zero-copy memory. uint8_t zero_copy; // This will be nonzero if we're using managed memory. uint8_t managed_memory; } WalkState; // Converts a 64-bit count of nanoseconds to a floating-point number of // seconds. static double ConvertToSeconds(uint64_t nanoseconds) { return ((double) nanoseconds) / 1e9; } // Returns the value of CUDA's global nanosecond timer. static __device__ __inline__ uint64_t GlobalTimer64(void) { uint64_t to_return; asm volatile("mov.u64 %0, %%globaltimer;" : "=l"(to_return)); return to_return; } __global__ void DoInOrderWalk(uint32_t *array, uint8_t *outputs, uint64_t *times) { int i, j, current, thread_number; uint32_t sum = 0; if (threadIdx.x == 0) { times[blockIdx.x * 2] = GlobalTimer64(); } __syncthreads(); thread_number = (blockIdx.x * THREAD_COUNT) + threadIdx.x; //current = thread_number; for (i = 0; i < STEPS_MAJOR; i++) { for (j = 0; j < STEPS_MINOR; j++) { current = j; sum += array[current]; } } outputs[thread_number] = (uint8_t) sum; __syncthreads(); if (threadIdx.x == 0) { times[(blockIdx.x * 2) + 1] = GlobalTimer64(); } } // Returns a random 31-bit integer in the range [0, limit). static inline uint32_t Rand32(uint32_t limit) { return lrand48() % limit; } // Takes an array and randomly shuffles its contents. The length parameter is // the number of elements in the array. Won't work properly for arrays // containing over 2^32 elements. static void ShuffleArray(uint32_t *array, size_t length) { size_t i, j; uint32_t tmp; if (length <= 1) return; for (i = 0; i < length; i++) { j = i + Rand32(length - i); tmp = array[j]; array[j] = array[i]; array[i] = tmp; } } // Selects and initializes the device to run the benchmarks on. void Initialize(int sync_level) { cudaError_t error = cudaErrorInvalidValue; switch (sync_level) { case 0: error = cudaSetDeviceFlags(cudaDeviceScheduleSpin); break; case 1: error = cudaSetDeviceFlags(cudaDeviceScheduleYield); break; case 2: error = cudaSetDeviceFlags(cudaDeviceBlockingSync); break; case 3: error = cudaSetDeviceFlags(cudaDeviceMapHost); break; default: printf("Invalid sync level: %d\n", sync_level); exit(1); } CheckError(error); CheckError(cudaSetDevice(0)); } // Allocates and initializes all values (including the stream), in the state // struct. Requires the name of a file containing the pre-generated random walk // array. void AllocateMemory(WalkState *state) { uint32_t i = 0; size_t array_size = ARRAY_LENGTH * sizeof(uint32_t); size_t output_array_size = THREAD_COUNT * BLOCK_COUNT * sizeof(uint8_t); size_t times_array_size = BLOCK_COUNT * 2 * sizeof(uint64_t); if (state->managed_memory) { CheckError(cudaMallocManaged(&state->host_times, times_array_size)); CheckError(cudaMallocManaged(&state->host_array, array_size)); CheckError(cudaMallocManaged(&state->host_outputs, output_array_size)); } else { CheckError(cudaMallocHost(&state->host_times, times_array_size)); CheckError(cudaMallocHost(&state->host_array, array_size)); CheckError(cudaMallocHost(&state->host_outputs, output_array_size)); if (!state->zero_copy) { // Should we use zero-copy for the times array? For now, we will. CheckError(cudaMalloc(&state->device_times, times_array_size)); CheckError(cudaMalloc(&state->device_array, array_size)); CheckError(cudaMalloc(&state->device_outputs, output_array_size)); } else { CheckError(cudaHostGetDevicePointer(&state->device_times, state->host_times, 0)); CheckError(cudaHostGetDevicePointer(&state->device_array, state->host_array, 0)); CheckError(cudaHostGetDevicePointer(&state->device_outputs, state->host_outputs, 0)); } } CheckError(cudaStreamCreate(&state->stream)); //printf("Generating inorder walk array... "); //fflush(stdout); for (i = 0; i < ARRAY_LENGTH; i++) { state->host_array[i] = i; } ShuffleArray(state->host_array, ARRAY_LENGTH); //printf("done!\n"); } // Copies input arrays to the device. For this particular benchmark, this only // needs to be called once, since the input array is only read, and the output // array is always completely overwritten. void CopyIn(WalkState *state) { if (state->zero_copy || state->managed_memory) return; size_t array_size = ARRAY_LENGTH * sizeof(uint32_t); CheckError(cudaMemcpyAsync(state->device_array, state->host_array, array_size, cudaMemcpyHostToDevice, state->stream)); CheckError(cudaStreamSynchronize(state->stream)); } // Copies the output array from the device. Should be called after every // iteration, so that times can be recorded. void CopyOut(WalkState *state) { if (state->zero_copy || state->managed_memory) return; size_t output_array_size = THREAD_COUNT * BLOCK_COUNT * sizeof(uint8_t); size_t times_array_size = BLOCK_COUNT * 2 * sizeof(uint64_t); CheckError(cudaMemcpyAsync(state->host_outputs, state->device_outputs, output_array_size, cudaMemcpyDeviceToHost, state->stream)); CheckError(cudaMemcpyAsync(state->host_times, state->device_times, times_array_size, cudaMemcpyDeviceToHost, state->stream)); CheckError(cudaStreamSynchronize(state->stream)); } // Frees memory and closes the device stream. This will also reset the // zero_copy field to 0. void FreeMemory(WalkState *state) { CheckError(cudaStreamSynchronize(state->stream)); CheckError(cudaStreamDestroy(state->stream)); if (!state->zero_copy && !state->managed_memory) { CheckError(cudaFree(state->device_array)); CheckError(cudaFree(state->device_outputs)); CheckError(cudaFree(state->device_times)); } if (state->managed_memory) { CheckError(cudaFree(state->host_array)); CheckError(cudaFree(state->host_outputs)); CheckError(cudaFree(state->host_times)); } else { CheckError(cudaFreeHost(state->host_array)); CheckError(cudaFreeHost(state->host_outputs)); CheckError(cudaFreeHost(state->host_times)); } memset(state, 0, sizeof(*state)); } // Checks command-line arguments and sets members of the state struct if any // are affected. May exit the program if any arguments are invalid. static void ParseArgs(int argc, char **argv, WalkState *state) { int i; state->zero_copy = 0; if (argc == 1) return; if (argc != 2) { printf("Usage: %s [-zc|-mm]\n" " Specify -zc to use zero-copy memory.\n Specify -mm to use managed memory", argv[0]); exit(1); } for (i = 1; i < argc; i++) { if (strncmp(argv[i], "-zc", 3) == 0) { state->zero_copy = 1; continue; } if (strncmp(argv[i], "-mm", 3) == 0) { state->managed_memory = 1; continue; } printf("Unknown argument: %s\n", argv[i]); exit(1); } } int main(int argc, char **argv) { int i, j; double block_start, block_end; WalkState state; srand48(RANDOM_SEED); ParseArgs(argc, argv, &state); // Initialize and allocate memory, then lock pages. Initialize(3); AllocateMemory(&state); if (!mlockall(MCL_CURRENT | MCL_FUTURE)) { printf("Error: failed locking pages in memory\n"); return 1; } dim3 threads_per_block(THREAD_COUNT); dim3 block_count(BLOCK_COUNT); // We only need to copy in one time; the input array doesn't change. CopyIn(&state); for (i = 0; i < KERNEL_INVOCATIONS; i++) { if (state.managed_memory) { DoInOrderWalk<<<block_count, threads_per_block, 0, state.stream>>>( state.host_array, state.host_outputs, state.host_times); CheckError(cudaDeviceSynchronize()); } else { DoInOrderWalk<<<block_count, threads_per_block, 0, state.stream>>>( state.device_array, state.device_outputs, state.device_times); } CheckError(cudaStreamSynchronize(state.stream)); CopyOut(&state); for (j = 0; j < BLOCK_COUNT; j++) { block_start = ConvertToSeconds(state.host_times[j * 2]); block_end = ConvertToSeconds(state.host_times[(j * 2) + 1]); printf("Block %d: start: %f end: %f elapsed: %f\n", j, block_start, block_end, block_end - block_start); } } FreeMemory(&state); CheckError(cudaDeviceReset()); return 0; }
dd10dcf6c728d48047b88233667da2166809acc7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> //#define DEVICE_ALLOC //#define UVM_ALLOC #define BLOCK_PER_SM 8 #define SM_NUM 56 #define BLOCK_NUM (SM_NUM * BLOCK_PER_SM) #define THREAD_PER_BLOCK 256 #define TOTAL_NUM (BLOCK_NUM * THREAD_PER_BLOCK) #define SIZE (1024 * 1024 * 9 * 7 * 5 * 12L) //#define STEP (512) //#define DEV_SIZE SIZE #define STEP (1) __global__ void kernel(int *input) { int tmp; __shared__ int s_tmp; s_tmp = 0; unsigned idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned long long begin = DEV_SIZE / TOTAL_NUM * idx; unsigned long long end = DEV_SIZE / TOTAL_NUM * (idx + 1); for (unsigned long long i = begin; i < end; i += STEP) { tmp = input[i]; s_tmp += tmp; } } class timer { hipEvent_t start; hipEvent_t end; public: timer() { hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start,0); } ~timer() { hipEventDestroy(start); hipEventDestroy(end); } float milliseconds_elapsed() { float elapsed_time; hipEventRecord(end, 0); hipEventSynchronize(end); hipEventElapsedTime(&elapsed_time, start, end); return elapsed_time; } float seconds_elapsed() { return milliseconds_elapsed() / 1000.0; } }; int main() { timer time_overall; int *d_input; #if defined(DEVICE_ALLOC) hipMalloc(&d_input, SIZE*sizeof(int)); #elif defined(UVM_ALLOC) hipMallocManaged(&d_input, SIZE*sizeof(int)); #else return 0; #endif // init #if defined(DEVICE_ALLOC) int *h_input; h_input = (int*)malloc(SIZE*sizeof(int)); for (unsigned long long i = 0; i < SIZE; i++) { h_input[i] = i; } timer time_in; hipMemcpy(d_input, h_input, SIZE*sizeof(int), hipMemcpyHostToDevice); #elif defined(UVM_ALLOC) for (unsigned long long i = 0; i < SIZE; i++) { d_input[i] = i; } timer time_in; #endif hipLaunchKernelGGL(( kernel), dim3(BLOCK_NUM), dim3(THREAD_PER_BLOCK), 0, 0, d_input); hipDeviceSynchronize(); double in_time = time_in.seconds_elapsed(); timer time_out; unsigned long long sum = 0; #if defined(DEVICE_ALLOC) hipMemcpy(h_input, d_input, SIZE*sizeof(int), hipMemcpyDeviceToHost); for (unsigned long long i = 0; i < SIZE; i++) { sum += h_input[i]; } #elif defined(UVM_ALLOC) for (unsigned long long i = 0; i < SIZE; i++) { sum += d_input[i]; } #endif double out_time = time_out.seconds_elapsed(); hipFree(d_input); #if defined(DEVICE_ALLOC) free(h_input); #endif double total_time = time_overall.seconds_elapsed(); printf("Time: %f sec\n", total_time); printf("In time: %f sec\n", in_time); printf("Out time: %f sec\n", out_time); printf("Sum: %llu\n", sum); return 0; }
dd10dcf6c728d48047b88233667da2166809acc7.cu
#include <stdio.h> #include <stdlib.h> //#define DEVICE_ALLOC //#define UVM_ALLOC #define BLOCK_PER_SM 8 #define SM_NUM 56 #define BLOCK_NUM (SM_NUM * BLOCK_PER_SM) #define THREAD_PER_BLOCK 256 #define TOTAL_NUM (BLOCK_NUM * THREAD_PER_BLOCK) #define SIZE (1024 * 1024 * 9 * 7 * 5 * 12L) //#define STEP (512) //#define DEV_SIZE SIZE #define STEP (1) __global__ void kernel(int *input) { int tmp; __shared__ int s_tmp; s_tmp = 0; unsigned idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned long long begin = DEV_SIZE / TOTAL_NUM * idx; unsigned long long end = DEV_SIZE / TOTAL_NUM * (idx + 1); for (unsigned long long i = begin; i < end; i += STEP) { tmp = input[i]; s_tmp += tmp; } } class timer { cudaEvent_t start; cudaEvent_t end; public: timer() { cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start,0); } ~timer() { cudaEventDestroy(start); cudaEventDestroy(end); } float milliseconds_elapsed() { float elapsed_time; cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&elapsed_time, start, end); return elapsed_time; } float seconds_elapsed() { return milliseconds_elapsed() / 1000.0; } }; int main() { timer time_overall; int *d_input; #if defined(DEVICE_ALLOC) cudaMalloc(&d_input, SIZE*sizeof(int)); #elif defined(UVM_ALLOC) cudaMallocManaged(&d_input, SIZE*sizeof(int)); #else return 0; #endif // init #if defined(DEVICE_ALLOC) int *h_input; h_input = (int*)malloc(SIZE*sizeof(int)); for (unsigned long long i = 0; i < SIZE; i++) { h_input[i] = i; } timer time_in; cudaMemcpy(d_input, h_input, SIZE*sizeof(int), cudaMemcpyHostToDevice); #elif defined(UVM_ALLOC) for (unsigned long long i = 0; i < SIZE; i++) { d_input[i] = i; } timer time_in; #endif kernel<<<BLOCK_NUM, THREAD_PER_BLOCK>>>(d_input); cudaDeviceSynchronize(); double in_time = time_in.seconds_elapsed(); timer time_out; unsigned long long sum = 0; #if defined(DEVICE_ALLOC) cudaMemcpy(h_input, d_input, SIZE*sizeof(int), cudaMemcpyDeviceToHost); for (unsigned long long i = 0; i < SIZE; i++) { sum += h_input[i]; } #elif defined(UVM_ALLOC) for (unsigned long long i = 0; i < SIZE; i++) { sum += d_input[i]; } #endif double out_time = time_out.seconds_elapsed(); cudaFree(d_input); #if defined(DEVICE_ALLOC) free(h_input); #endif double total_time = time_overall.seconds_elapsed(); printf("Time: %f sec\n", total_time); printf("In time: %f sec\n", in_time); printf("Out time: %f sec\n", out_time); printf("Sum: %llu\n", sum); return 0; }
26f20064edf857b6d7304476102f6a52f0d7565f.hip
// !!! This is a file automatically generated by hipify!!! /** * \file dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter_per_chan_id.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ // generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>> epilogue, const ConvParam& param, float alpha, float beta, hipStream_t stream);
26f20064edf857b6d7304476102f6a52f0d7565f.cu
/** * \file dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter_per_chan_id.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ // generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>> epilogue, const ConvParam& param, float alpha, float beta, cudaStream_t stream);
1af01bb310c06d533d12a5df05b1eb371ee70567.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "hip/hip_runtime.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { hipError_t error = hipGetLastError (); if (error != hipSuccess) { printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error)); exit(-1); } } __global__ void __launch_bounds__ (128,2) sw4_1 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0; double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1; double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2; double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4; double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4; double a_r1, b_r1; if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 3 for (int k=2; k<=N-3; k+=2) { a_mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2]; a_mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1]; a_mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i]; a_mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2]; a_muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2]; a_muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1]; a_muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j]; a_muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2]; a_muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2]; a_muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1]; a_muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k]; a_muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2]; a_r1 = 1e0 / 6 * (strx[i] * ((2 * a_mux1 + la[k][j][i-1] * strx[i-1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i-2] * strx[i-2]) * (u_0[k][j][i-2] - u_0[k][j][i]) + (2 * a_mux2 + la[k][j][i-2] * strx[i-2] + la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i] + 3 * la[k][j][i-1] * strx[i-1]) * (u_0[k][j][i-1] - u_0[k][j][i]) + (2 * a_mux3 + la[k][j][i-1] * strx[i-1] + la[k][j][i+2] * strx[i+2] + 3 * la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i]) * (u_0[k][j][i+1] - u_0[k][j][i]) + (2 * a_mux4 + la[k][j][i+1] * strx[i+1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i+2] * strx[i+2]) * (u_0[k][j][i+2] - u_0[k][j][i])) + stry[j] * (a_muy1 * (u_0[k][j-2][i] - u_0[k][j][i]) + a_muy2 * (u_0[k][j-1][i] - u_0[k][j][i]) + a_muy3 * (u_0[k][j+1][i] - u_0[k][j][i]) + a_muy4 * (u_0[k][j+2][i] - u_0[k][j][i])) + strz[k] * (a_muz1 * (u_0[k-2][j][i] - u_0[k][j][i]) + a_muz2 * (u_0[k-1][j][i] - u_0[k][j][i]) + a_muz3 * (u_0[k+1][j][i] - u_0[k][j][i]) + a_muz4 * (u_0[k+2][j][i] - u_0[k][j][i]))); a_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k][j][i-2] * (u_1[k][j-2][i-2] - u_1[k][j+2][i-2] + 8 * (-u_1[k][j-1][i-2] + u_1[k][j+1][i-2])) - 8 * (la[k][j][i-1] * (u_1[k][j-2][i-1] - u_1[k][j+2][i-1] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j+1][i-1]))) + 8 * (la[k][j][i+1] * (u_1[k][j-2][i+1] - u_1[k][j+2][i+1] + 8 * (-u_1[k][j-1][i+1] + u_1[k][j+1][i+1]))) - (la[k][j][i+2] * (u_1[k][j-2][i+2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j-1][i+2] + u_1[k][j+1][i+2])))); a_r1 += strx[i] * strz[k] * (1e0 / 144) * (la[k][j][i-2] * (u_2[k-2][j][i-2] - u_2[k+2][j][i-2] + 8 * (-u_2[k-1][j][i-2] + u_2[k+1][j][i-2])) - 8 * (la[k][j][i-1] * (u_2[k-2][j][i-1] - u_2[k+2][j][i-1] + 8 * (-u_2[k-1][j][i-1] + u_2[k+1][j][i-1]))) + 8 * (la[k][j][i+1] * (u_2[k-2][j][i+1] - u_2[k+2][j][i+1] + 8 * (-u_2[k-1][j][i+1] + u_2[k+1][j][i+1]))) - (la[k][j][i+2] * (u_2[k-2][j][i+2] - u_2[k+2][j][i+2] + 8 * (-u_2[k-1][j][i+2] + u_2[k+1][j][i+2])))); a_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k][j-2][i-2] - u_1[k][j-2][i+2] + 8 * (-u_1[k][j-2][i-1] + u_1[k][j-2][i+1])) - 8 * (mu[k][j-1][i] * (u_1[k][j-1][i-2] - u_1[k][j-1][i+2] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j-1][i+1]))) + 8 * (mu[k][j+1][i] * (u_1[k][j+1][i-2] - u_1[k][j+1][i+2] + 8 * (-u_1[k][j+1][i-1] + u_1[k][j+1][i+1]))) - (mu[k][j+2][i] * (u_1[k][j+2][i-2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j+2][i-1] + u_1[k][j+2][i+1])))); a_r1 += strx[i] * strz[k] * (1e0 / 144) * (mu[k-2][j][i] * (u_2[k-2][j][i-2] - u_2[k-2][j][i+2] + 8 * (-u_2[k-2][j][i-1] + u_2[k-2][j][i+1])) - 8 * (mu[k-1][j][i] * (u_2[k-1][j][i-2] - u_2[k-1][j][i+2] + 8 * (-u_2[k-1][j][i-1] + u_2[k-1][j][i+1]))) + 8 * (mu[k+1][j][i] * (u_2[k+1][j][i-2] - u_2[k+1][j][i+2] + 8 * (-u_2[k+1][j][i-1] + u_2[k+1][j][i+1]))) - (mu[k+2][j][i] * (u_2[k+2][j][i-2] - u_2[k+2][j][i+2] + 8 * (-u_2[k+2][j][i-1] + u_2[k+2][j][i+1])))); uacc_0[k][j][i] = a1 * uacc_0[k][j][i] + cof * a_r1; b_mux1 = mu[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i-2] * strx[i-2]; b_mux2 = mu[k+1][j][i-2] * strx[i-2] + mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i] + 3.0 * mu[k+1][j][i-1] * strx[i-1]; b_mux3 = mu[k+1][j][i-1] * strx[i-1] + mu[k+1][j][i+2] * strx[i+2] + 3.0 * mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i]; b_mux4 = mu[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i+2] * strx[i+2]; b_muy1 = mu[k+1][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] -3e0 / 4 * mu[k+1][j-2][i] * stry[j-2]; b_muy2 = mu[k+1][j-2][i] * stry[j-2] + mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j] + 3.0 * mu[k+1][j-1][i] * stry[j-1]; b_muy3 = mu[k+1][j-1][i] * stry[j-1] + mu[k+1][j+2][i] * stry[j+2] + 3.0 * mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j]; b_muy4 = mu[k+1][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] - 3e0 / 4 * mu[k+1][j+2][i] * stry[j+2]; b_muz1 = mu[k+1-1][j][i] * strz[k+1-1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k+1-2][j][i] * strz[k+1-2]; b_muz2 = mu[k+1-2][j][i] * strz[k+1-2] + mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k+1-1][j][i] * strz[k+1-1]; b_muz3 = mu[k+1-1][j][i] * strz[k+1-1] + mu[k+1+2][j][i] * strz[k+1+2] + 3.0 * mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1]; b_muz4 = mu[k+1+1][j][i] * strz[k+1+1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 /4 * mu[k+1+2][j][i] * strz[k+1+2]; b_r1 = 1e0 / 6 * (strx[i] * ((2 * b_mux1 + la[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i-2] * strx[i-2]) * (u_0[k+1][j][i-2] - u_0[k+1][j][i]) + (2 * b_mux2 + la[k+1][j][i-2] * strx[i-2] + la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i] + 3 * la[k+1][j][i-1] * strx[i-1]) * (u_0[k+1][j][i-1] - u_0[k+1][j][i]) + (2 * b_mux3 + la[k+1][j][i-1] * strx[i-1] + la[k+1][j][i+2] * strx[i+2] + 3 * la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i]) * (u_0[k+1][j][i+1] - u_0[k+1][j][i]) + (2 * b_mux4 + la[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i+2] * strx[i+2]) * (u_0[k+1][j][i+2] - u_0[k+1][j][i])) + stry[j] * (b_muy1 * (u_0[k+1][j-2][i] - u_0[k+1][j][i]) + b_muy2 * (u_0[k+1][j-1][i] - u_0[k+1][j][i]) + b_muy3 * (u_0[k+1][j+1][i] - u_0[k+1][j][i]) + b_muy4 * (u_0[k+1][j+2][i] - u_0[k+1][j][i])) + strz[k+1] * (b_muz1 * (u_0[k+1-2][j][i] - u_0[k+1][j][i]) + b_muz2 * (u_0[k+1-1][j][i] - u_0[k+1][j][i]) + b_muz3 * (u_0[k+1+1][j][i] - u_0[k+1][j][i]) + b_muz4 * (u_0[k+1+2][j][i] - u_0[k+1][j][i]))); b_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k+1][j][i-2] * (u_1[k+1][j-2][i-2] - u_1[k+1][j+2][i-2] + 8 * (-u_1[k+1][j-1][i-2] + u_1[k+1][j+1][i-2])) - 8 * (la[k+1][j][i-1] * (u_1[k+1][j-2][i-1] - u_1[k+1][j+2][i-1] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j+1][i-1]))) + 8 * (la[k+1][j][i+1] * (u_1[k+1][j-2][i+1] - u_1[k+1][j+2][i+1] + 8 * (-u_1[k+1][j-1][i+1] + u_1[k+1][j+1][i+1]))) - (la[k+1][j][i+2] * (u_1[k+1][j-2][i+2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j-1][i+2] + u_1[k+1][j+1][i+2])))); b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (la[k+1][j][i-2] * (u_2[k+1-2][j][i-2] - u_2[k+1+2][j][i-2] + 8 * (-u_2[k+1-1][j][i-2] + u_2[k+1+1][j][i-2])) - 8 * (la[k+1][j][i-1] * (u_2[k+1-2][j][i-1] - u_2[k+1+2][j][i-1] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1+1][j][i-1]))) + 8 * (la[k+1][j][i+1] * (u_2[k+1-2][j][i+1] - u_2[k+1+2][j][i+1] + 8 * (-u_2[k+1-1][j][i+1] + u_2[k+1+1][j][i+1]))) - (la[k+1][j][i+2] * (u_2[k+1-2][j][i+2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1-1][j][i+2] + u_2[k+1+1][j][i+2])))); b_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k+1][j-2][i] * (u_1[k+1][j-2][i-2] - u_1[k+1][j-2][i+2] + 8 * (-u_1[k+1][j-2][i-1] + u_1[k+1][j-2][i+1])) - 8 * (mu[k+1][j-1][i] * (u_1[k+1][j-1][i-2] - u_1[k+1][j-1][i+2] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j-1][i+1]))) + 8 * (mu[k+1][j+1][i] * (u_1[k+1][j+1][i-2] - u_1[k+1][j+1][i+2] + 8 * (-u_1[k+1][j+1][i-1] + u_1[k+1][j+1][i+1]))) - (mu[k+1][j+2][i] * (u_1[k+1][j+2][i-2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j+2][i-1] + u_1[k+1][j+2][i+1])))); b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (mu[k+1-2][j][i] * (u_2[k+1-2][j][i-2] - u_2[k+1-2][j][i+2] + 8 * (-u_2[k+1-2][j][i-1] + u_2[k+1-2][j][i+1])) - 8 * (mu[k+1-1][j][i] * (u_2[k+1-1][j][i-2] - u_2[k+1-1][j][i+2] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1-1][j][i+1]))) + 8 * (mu[k+1+1][j][i] * (u_2[k+1+1][j][i-2] - u_2[k+1+1][j][i+2] + 8 * (-u_2[k+1+1][j][i-1] + u_2[k+1+1][j][i+1]))) - (mu[k+1+2][j][i] * (u_2[k+1+2][j][i-2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1+2][j][i-1] + u_2[k+1+2][j][i+1])))); uacc_0[k+1][j][i] = a1 * uacc_0[k+1][j][i] + cof * b_r1; } } } __global__ void __launch_bounds__ (128,2) sw4_2 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0; double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1; double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2; double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4; double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4; double a_r2, b_r2; if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 3 for (int k=2; k<=N-3; k+=2) { a_mux1 = mu[k][j][i-1] * strx[i-1]; a_mux1 -= 3.0 / 4.0 * mu[k][j][i] * strx[i]; a_mux1 -= 3.0 / 4.0 * mu[k][j][i-2] * strx[i-2]; a_mux2 = mu[k][j][i-2] * strx[i-2]; a_mux2 += mu[k][j][i+1] * strx[i+1]; a_mux2 += 3.0 * mu[k][j][i] * strx[i]; a_mux2 += 3.0 * mu[k][j][i-1] * strx[i-1]; a_mux3 = mu[k][j][i-1] * strx[i-1]; a_mux3 += mu[k][j][i+2] * strx[i+2]; a_mux3 += 3.0 * mu[k][j][i+1] * strx[i+1]; a_mux3 += 3.0 * mu[k][j][i] * strx[i]; a_mux4 = mu[k][j][i+1] * strx[i+1]; a_mux4 -= 3.0 / 4.0 * mu[k][j][i] * strx[i]; a_mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2]; a_muy1 = mu[k][j-1][i] * stry[j-1]; a_muy1 -= 3.0 / 4.0 * mu[k][j][i] * stry[j]; a_muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2]; a_muy2 = mu[k][j-2][i] * stry[j-2]; a_muy2 += mu[k][j+1][i] * stry[j+1]; a_muy2 += 3.0 * mu[k][j][i] * stry[j]; a_muy2 += 3.0 * mu[k][j-1][i] * stry[j-1]; a_muy3 = mu[k][j-1][i] * stry[j-1]; a_muy3 += mu[k][j+2][i] * stry[j+2]; a_muy3 += 3.0 * mu[k][j+1][i] * stry[j+1]; a_muy3 += 3.0 * mu[k][j][i] * stry[j]; a_muy4 = mu[k][j+1][i] * stry[j+1]; a_muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2]; a_muy4 -= 3.0 / 4.0 * mu[k][j][i] * stry[j]; a_muz1 = mu[k-1][j][i] * strz[k-1]; a_muz1 -= 3.0 / 4.0 * mu[k][j][i] * strz[k]; a_muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2]; a_muz2 = mu[k-2][j][i] * strz[k-2]; a_muz2 += mu[k+1][j][i] * strz[k+1]; a_muz2 += 3.0 * mu[k][j][i] * strz[k]; a_muz2 += 3.0 * mu[k-1][j][i] * strz[k-1]; a_muz3 = mu[k-1][j][i] * strz[k-1]; a_muz3 += mu[k+2][j][i] * strz[k+2]; a_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1]; a_muz3 += 3.0 * mu[k][j][i] * strz[k]; a_muz4 = mu[k+1][j][i] * strz[k+1]; a_muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k]; a_muz4 -= 3.0 / 4.0 * mu[k+2][j][i] * strz[k+2]; double _v_18_ = a_mux1 * u_1[k][j][i-2]; _v_18_ -= a_mux1 * u_1[k][j][i]; double _v_22_ = strx[i] * _v_18_; double _v_19_ = a_mux2 * u_1[k][j][i-1]; _v_19_ -= a_mux2 * u_1[k][j][i]; _v_22_ += strx[i] * _v_19_; double _v_20_ = a_mux3 * u_1[k][j][i+1]; _v_20_ -= a_mux3 * u_1[k][j][i]; _v_22_ += strx[i] * _v_20_; double _v_21_ = a_mux4 * u_1[k][j][i+2]; _v_21_ -= a_mux4 * u_1[k][j][i]; _v_22_ += strx[i] * _v_21_; a_r2 = 1.0 / 6.0 * _v_22_; double _t_8_ = u_1[k][j-2][i]; _t_8_ -= u_1[k][j][i]; double _t_7_ = 2.0 * a_muy1; double _v_23_ = la[k][j-1][i] * stry[j-1]; _t_7_ += _v_23_; _t_7_ -= 3.0 / 4.0 * la[k][j][i] * stry[j]; double _t_9_ = 3.0 * la[k][j][i] * stry[j]; _t_9_ += 3.0 * la[k][j-1][i] * stry[j-1]; _t_9_ += 2.0 * a_muy2; double _t_11_ = 3.0 * la[k][j][i] * stry[j]; _t_11_ += 2.0 * a_muy3; double _t_13_ = -(3.0 / 4.0 * la[k][j][i] * stry[j]); _t_13_ += 2.0 * a_muy4; _t_7_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2]; double _v_24_ = _t_7_ * _t_8_; double _v_33_ = stry[j] * _v_24_; _t_9_ += la[k][j-2][i] * stry[j-2]; _t_9_ += la[k][j+1][i] * stry[j+1]; double _t_10_ = u_1[k][j-1][i]; _t_10_ -= u_1[k][j][i]; double _v_27_ = _t_9_ * _t_10_; _v_33_ += stry[j] * _v_27_; _t_11_ += _v_23_; _t_11_ += la[k][j+2][i] * stry[j+2]; _t_11_ += 3.0 * la[k][j+1][i] * stry[j+1]; double _t_12_ = u_1[k][j+1][i]; _t_12_ -= u_1[k][j][i]; double _v_30_ = _t_11_ * _t_12_; _v_33_ += stry[j] * _v_30_; _t_13_ += la[k][j+1][i] * stry[j+1]; _t_13_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2]; double _t_14_ = u_1[k][j+2][i]; _t_14_ -= u_1[k][j][i]; double _v_32_ = _t_13_ * _t_14_; _v_33_ += stry[j] * _v_32_; a_r2 += 1.0 / 6.0 * _v_33_; double _v_34_ = a_muz1 * u_1[k-2][j][i]; _v_34_ -= a_muz1 * u_1[k][j][i]; double _v_38_ = strz[k] * _v_34_; double _v_35_ = a_muz2 * u_1[k-1][j][i]; _v_35_ -= a_muz2 * u_1[k][j][i]; _v_38_ += strz[k] * _v_35_; double _v_36_ = a_muz3 * u_1[k+1][j][i]; _v_36_ -= a_muz3 * u_1[k][j][i]; _v_38_ += strz[k] * _v_36_; double _v_37_ = a_muz4 * u_1[k+2][j][i]; _v_37_ -= a_muz4 * u_1[k][j][i]; _v_38_ += strz[k] * _v_37_; a_r2 += 1.0 / 6.0 * _v_38_; double _t_27_ = u_0[k][j-2][i-1]; _t_27_ -= u_0[k][j+2][i-1]; _t_27_ += 8.0 * -u_0[k][j-1][i-1]; _t_27_ += 8.0 * u_0[k][j+1][i-1]; double _t_38_ = -u_0[k][j-2][i-1]; _t_38_ += u_0[k][j-2][i+1]; double _t_40_ = 8.0 * -u_0[k][j-1][i-1]; _t_40_ += 8.0 * u_0[k][j-1][i+1]; _t_40_ += u_0[k][j-1][i-2]; _t_40_ -= u_0[k][j-1][i+2]; double _t_43_ = 8.0 * -u_0[k][j+1][i-1]; _t_43_ += 8.0 * u_0[k][j+1][i+1]; _t_43_ += u_0[k][j+1][i-2]; _t_43_ -= u_0[k][j+1][i+2]; double _t_46_ = -u_0[k][j+2][i-1]; _t_46_ += u_0[k][j+2][i+1]; double _t_30_ = u_0[k][j-2][i+1]; _t_30_ -= u_0[k][j+2][i+1]; _t_30_ += 8.0 * -u_0[k][j-1][i+1]; _t_30_ += 8.0 * u_0[k][j+1][i+1]; double _t_25_ = -u_0[k][j-1][i-2]; _t_25_ += u_0[k][j+1][i-2]; double _t_33_ = -u_0[k][j-1][i+2]; _t_33_ += u_0[k][j+1][i+2]; double _t_22_ = -(8.0 * mu[k][j][i-1] * _t_27_); _t_22_ += 8.0 * mu[k][j][i+1] * _t_30_; double _v_39_ = mu[k][j][i-2] * u_0[k][j-2][i-2]; _v_39_ += mu[k][j][i-2] * 8.0 * _t_25_; _v_39_ -= mu[k][j][i-2] * u_0[k][j+2][i-2]; double _v_42_ = la[k][j-2][i] * u_0[k][j-2][i-2]; _v_42_ -= la[k][j-2][i] * u_0[k][j-2][i+2]; _v_42_ += la[k][j-2][i] * 8.0 * _t_38_; double _v_43_ = la[k][j+2][i] * u_0[k][j+2][i-2]; _v_43_ -= la[k][j+2][i] * u_0[k][j+2][i+2]; _v_43_ += la[k][j+2][i] * 8.0 * _t_46_; double _v_45_ = la[k][j-2][i] * u_2[k-2][j-2][i]; double _v_46_ = la[k][j+2][i] * u_2[k-2][j+2][i]; double _v_48_ = mu[k-2][j][i] * u_2[k-2][j-2][i]; _v_48_ -= mu[k-2][j][i] * u_2[k-2][j+2][i]; double _v_40_ = mu[k][j][i+2] * u_0[k][j-2][i+2]; _v_40_ += mu[k][j][i+2] * 8.0 * _t_33_; _v_40_ -= mu[k][j][i+2] * u_0[k][j+2][i+2]; _t_22_ += _v_39_; _t_22_ -= _v_40_; double _t_21_ = 1.0 / 144.0 * strx[i] * stry[j]; double _t_20_ = _t_21_ * _t_22_; double _t_35_ = _v_42_; _t_35_ -= 8.0 * la[k][j-1][i] * _t_40_; _t_35_ += 8.0 * la[k][j+1][i] * _t_43_; _t_35_ -= _v_43_; double _t_34_ = 1.0 / 144.0 * strx[i] * stry[j]; _t_20_ += _t_34_ * _t_35_; double _t_53_ = u_2[k-2][j-1][i]; _t_53_ += 8.0 * u_2[k+1][j-1][i]; double _t_64_ = -u_2[k-2][j-1][i]; _t_64_ += u_2[k-2][j+1][i]; double _t_69_ = 8.0 * -u_2[k+1][j-1][i]; _t_69_ += 8.0 * u_2[k+1][j+1][i]; _t_69_ += u_2[k+1][j-2][i]; _t_69_ -= u_2[k+1][j+2][i]; double _t_56_ = u_2[k-2][j+1][i]; _t_56_ += 8.0 * u_2[k+1][j+1][i]; double _t_51_ = u_2[k+1][j-2][i]; double _t_59_ = u_2[k+1][j+2][i]; _t_53_ -= u_2[k+2][j-1][i]; _t_53_ += 8.0 * -u_2[k-1][j-1][i]; double _t_48_ = -(8.0 * la[k][j-1][i] * _t_53_); _t_56_ -= u_2[k+2][j+1][i]; _t_56_ += 8.0 * -u_2[k-1][j+1][i]; _t_48_ += 8.0 * la[k][j+1][i] * _t_56_; _t_51_ += -u_2[k-1][j-2][i]; _v_45_ += la[k][j-2][i] * 8.0 * _t_51_; _v_45_ -= la[k][j-2][i] * u_2[k+2][j-2][i]; _t_48_ += _v_45_; _t_59_ += -u_2[k-1][j+2][i]; _v_46_ += la[k][j+2][i] * 8.0 * _t_59_; _v_46_ -= la[k][j+2][i] * u_2[k+2][j+2][i]; _t_48_ -= _v_46_; double _t_47_ = 1.0 / 144.0 * stry[j] * strz[k]; _t_20_ += _t_47_ * _t_48_; _v_48_ += mu[k-2][j][i] * 8.0 * _t_64_; double _t_61_ = _v_48_; _t_61_ += 8.0 * mu[k+1][j][i] * _t_69_; double _t_66_ = u_2[k-1][j-2][i]; _t_66_ -= u_2[k-1][j+2][i]; _t_66_ += 8.0 * -u_2[k-1][j-1][i]; _t_66_ += 8.0 * u_2[k-1][j+1][i]; _t_61_ -= 8.0 * mu[k-1][j][i] * _t_66_; double _t_72_ = -u_2[k+2][j-1][i]; _t_72_ += u_2[k+2][j+1][i]; double _v_49_ = mu[k+2][j][i] * 8.0 * _t_72_; _v_49_ += mu[k+2][j][i] * u_2[k+2][j-2][i]; _v_49_ -= mu[k+2][j][i] * u_2[k+2][j+2][i]; _t_61_ -= _v_49_; double _t_60_ = 1.0 / 144.0 * stry[j] * strz[k]; _t_20_ += _t_60_ * _t_61_; a_r2 += _t_20_; double uacc_1kc0jc0ic0 = cof * a_r2; uacc_1kc0jc0ic0 += a1 * uacc_1[k][j][i]; b_mux1 = mu[k+1][j][i-1] * strx[i-1]; b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i] * strx[i]; b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i-2] * strx[i-2]; b_mux2 = mu[k+1][j][i-2] * strx[i-2]; b_mux2 += mu[k+1][j][i+1] * strx[i+1]; b_mux2 += 3.0 * mu[k+1][j][i] * strx[i]; b_mux2 += 3.0 * mu[k+1][j][i-1] * strx[i-1]; b_mux3 = mu[k+1][j][i-1] * strx[i-1]; b_mux3 += mu[k+1][j][i+2] * strx[i+2]; b_mux3 += 3.0 * mu[k+1][j][i+1] * strx[i+1]; b_mux3 += 3.0 * mu[k+1][j][i] * strx[i]; b_mux4 = mu[k+1][j][i+1] * strx[i+1]; b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i+2] * strx[i+2]; b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i] * strx[i]; b_muy1 = mu[k+1][j-1][i] * stry[j-1]; b_muy1 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j]; b_muy1 -= 3.0 / 4.0 * mu[k+1][j-2][i] * stry[j-2]; b_muy2 = mu[k+1][j-2][i] * stry[j-2]; b_muy2 += mu[k+1][j+1][i] * stry[j+1]; b_muy2 += 3.0 * mu[k+1][j][i] * stry[j]; b_muy2 += 3.0 * mu[k+1][j-1][i] * stry[j-1]; b_muy3 = mu[k+1][j-1][i] * stry[j-1]; b_muy3 += mu[k+1][j+2][i] * stry[j+2]; b_muy3 += 3.0 * mu[k+1][j+1][i] * stry[j+1]; b_muy3 += 3.0 * mu[k+1][j][i] * stry[j]; b_muy4 = mu[k+1][j+1][i] * stry[j+1]; b_muy4 -= 3.0 / 4.0 * mu[k+1][j+2][i] * stry[j+2]; b_muy4 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j]; b_muz1 = mu[k][j][i] * strz[k]; b_muz1 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1]; b_muz1 -= 3.0 / 4.0 * mu[k-1][j][i] * strz[k-1]; b_muz2 = mu[k-1][j][i] * strz[k-1]; b_muz2 += mu[k+2][j][i] * strz[k+2]; b_muz2 += 3.0 * mu[k+1][j][i] * strz[k+1]; b_muz2 += 3.0 * mu[k][j][i] * strz[k]; b_muz3 = mu[k][j][i] * strz[k]; b_muz3 += mu[k+3][j][i] * strz[k+3]; b_muz3 += 3.0 * mu[k+2][j][i] * strz[k+2]; b_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1]; b_muz4 = mu[k+2][j][i] * strz[k+2]; b_muz4 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1]; b_muz4 -= 3.0 / 4.0 * mu[k+3][j][i] * strz[k+3]; double _v_87_ = b_muz1 * u_1[k-1][j][i]; _v_87_ -= b_muz1 * u_1[k+1][j][i]; double _v_88_ = -(b_muz2 * u_1[k+1][j][i]); _v_88_ += b_muz2 * u_1[k][j][i]; double _v_89_ = -(b_muz3 * u_1[k+1][j][i]); _v_89_ += b_muz3 * u_1[k+2][j][i]; double _v_90_ = -(b_muz4 * u_1[k+1][j][i]); _v_90_ += b_muz4 * u_1[k+3][j][i]; double _v_71_ = -(b_mux1 * u_1[k+1][j][i]); _v_71_ += b_mux1 * u_1[k+1][j][i-2]; double _v_72_ = -(b_mux2 * u_1[k+1][j][i]); _v_72_ += b_mux2 * u_1[k+1][j][i-1]; double _v_73_ = -(b_mux3 * u_1[k+1][j][i]); _v_73_ += b_mux3 * u_1[k+1][j][i+1]; double _v_74_ = -(b_mux4 * u_1[k+1][j][i]); _v_74_ += b_mux4 * u_1[k+1][j][i+2]; double _t_81_ = -(u_1[k+1][j][i]); _t_81_ += u_1[k+1][j-2][i]; double _t_83_ = -(u_1[k+1][j][i]); _t_83_ += u_1[k+1][j-1][i]; double _t_85_ = -(u_1[k+1][j][i]); _t_85_ += u_1[k+1][j+1][i]; double _t_87_ = -(u_1[k+1][j][i]); _t_87_ += u_1[k+1][j+2][i]; double _v_91_ = strz[k+1] * _v_87_; _v_91_ += strz[k+1] * _v_88_; _v_91_ += strz[k+1] * _v_89_; _v_91_ += strz[k+1] * _v_90_; b_r2 = 1.0 / 6.0 * _v_91_; double _v_75_ = strx[i] * _v_71_; _v_75_ += strx[i] * _v_72_; _v_75_ += strx[i] * _v_73_; _v_75_ += strx[i] * _v_74_; b_r2 += 1.0 / 6.0 * _v_75_; double _t_80_ = 2.0 * b_muy1; double _v_76_ = la[k+1][j-1][i] * stry[j-1]; _t_80_ += _v_76_; _t_80_ -= 3.0 / 4.0 * la[k+1][j][i] * stry[j]; double _t_82_ = 3.0 * la[k+1][j][i] * stry[j]; _t_82_ += 3.0 * la[k+1][j-1][i] * stry[j-1]; _t_82_ += 2.0 * b_muy2; double _t_84_ = 3.0 * la[k+1][j][i] * stry[j]; _t_84_ += 2.0 * b_muy3; double _t_86_ = -(3.0 / 4.0 * la[k+1][j][i] * stry[j]); _t_86_ += 2.0 * b_muy4; _t_80_ -= 3.0 / 4.0 * la[k+1][j-2][i] * stry[j-2]; double _v_77_ = _t_80_ * _t_81_; double _v_86_ = stry[j] * _v_77_; _t_82_ += la[k+1][j-2][i] * stry[j-2]; double _v_79_ = la[k+1][j+1][i] * stry[j+1]; _t_84_ += 3.0 * la[k+1][j+1][i] * stry[j+1]; _t_82_ += _v_79_; double _v_80_ = _t_82_ * _t_83_; _v_86_ += stry[j] * _v_80_; _t_84_ += _v_76_; _t_84_ += la[k+1][j+2][i] * stry[j+2]; _t_86_ -= 3.0 / 4.0 * la[k+1][j+2][i] * stry[j+2]; double _v_83_ = _t_84_ * _t_85_; _v_86_ += stry[j] * _v_83_; _t_86_ += _v_79_; double _v_85_ = _t_86_ * _t_87_; _v_86_ += stry[j] * _v_85_; b_r2 += 1.0 / 6.0 * _v_86_; double _v_101_ = mu[k-1][j][i] * u_2[k-1][j-2][i]; _v_101_ -= mu[k-1][j][i] * u_2[k-1][j+2][i]; double _v_98_ = la[k+1][j-2][i] * u_2[k-1][j-2][i]; _v_98_ -= la[k+1][j-2][i] * u_2[k+3][j-2][i]; double _v_99_ = la[k+1][j+2][i] * u_2[k-1][j+2][i]; _v_99_ -= la[k+1][j+2][i] * u_2[k+3][j+2][i]; double _v_95_ = la[k+1][j-2][i] * u_0[k+1][j-2][i-2]; _v_95_ -= la[k+1][j-2][i] * u_0[k+1][j-2][i+2]; double _v_96_ = la[k+1][j+2][i] * u_0[k+1][j+2][i-2]; _v_96_ -= la[k+1][j+2][i] * u_0[k+1][j+2][i+2]; double _v_102_ = mu[k+3][j][i] * u_2[k+3][j-2][i]; _v_102_ -= mu[k+3][j][i] * u_2[k+3][j+2][i]; double _v_92_ = mu[k+1][j][i-2] * u_0[k+1][j-2][i-2]; _v_92_ -= mu[k+1][j][i-2] * u_0[k+1][j+2][i-2]; double _v_93_ = mu[k+1][j][i+2] * u_0[k+1][j-2][i+2]; _v_93_ -= mu[k+1][j][i+2] * u_0[k+1][j+2][i+2]; double _t_137_ = -u_2[k-1][j-1][i]; _t_137_ += u_2[k-1][j+1][i]; double _t_126_ = u_2[k-1][j-1][i]; _t_126_ -= u_2[k+3][j-1][i]; _t_126_ += 8.0 * -u_2[k][j-1][i]; _t_126_ += 8.0 * u_2[k+2][j-1][i]; double _t_129_ = u_2[k-1][j+1][i]; _t_129_ -= u_2[k+3][j+1][i]; _t_129_ += 8.0 * -u_2[k][j+1][i]; _t_129_ += 8.0 * u_2[k+2][j+1][i]; double _t_142_ = 8.0 * -u_2[k+2][j-1][i]; _t_142_ += 8.0 * u_2[k+2][j+1][i]; _t_142_ += u_2[k+2][j-2][i]; _t_142_ -= u_2[k+2][j+2][i]; double _t_139_ = 8.0 * -u_2[k][j-1][i]; _t_139_ += 8.0 * u_2[k][j+1][i]; _t_139_ += u_2[k][j-2][i]; _t_139_ -= u_2[k][j+2][i]; double _t_145_ = -u_2[k+3][j-1][i]; _t_145_ += u_2[k+3][j+1][i]; double _t_124_ = -u_2[k][j-2][i]; _t_124_ += u_2[k+2][j-2][i]; double _t_132_ = -u_2[k][j+2][i]; _t_132_ += u_2[k+2][j+2][i]; _v_101_ += mu[k-1][j][i] * 8.0 * _t_137_; double _t_134_ = _v_101_; _t_134_ += 8.0 * mu[k+2][j][i] * _t_142_; _t_134_ -= 8.0 * mu[k][j][i] * _t_139_; _v_102_ += mu[k+3][j][i] * 8.0 * _t_145_; _t_134_ -= _v_102_; double _t_133_ = 1.0 / 144.0 * stry[j] * strz[k+1]; double _t_94_ = 1.0 / 144.0 * strx[i] * stry[j]; double _t_93_ = _t_133_ * _t_134_; _v_98_ += la[k+1][j-2][i] * 8.0 * _t_124_; double _t_121_ = _v_98_; _t_121_ -= 8.0 * la[k+1][j-1][i] * _t_126_; _t_121_ += 8.0 * la[k+1][j+1][i] * _t_129_; _v_99_ += la[k+1][j+2][i] * 8.0 * _t_132_; _t_121_ -= _v_99_; double _t_120_ = _t_133_; _t_93_ += _t_120_ * _t_121_; double _t_100_ = u_0[k+1][j-2][i-1]; _t_100_ -= u_0[k+1][j+2][i-1]; _t_100_ += 8.0 * -u_0[k+1][j-1][i-1]; _t_100_ += 8.0 * u_0[k+1][j+1][i-1]; double _t_111_ = -u_0[k+1][j-2][i-1]; _t_111_ += u_0[k+1][j-2][i+1]; double _t_113_ = 8.0 * -u_0[k+1][j-1][i-1]; _t_113_ += 8.0 * u_0[k+1][j-1][i+1]; _t_113_ += u_0[k+1][j-1][i-2]; _t_113_ -= u_0[k+1][j-1][i+2]; double _t_116_ = 8.0 * -u_0[k+1][j+1][i-1]; _t_116_ += 8.0 * u_0[k+1][j+1][i+1]; _t_116_ += u_0[k+1][j+1][i-2]; _t_116_ -= u_0[k+1][j+1][i+2]; double _t_119_ = -u_0[k+1][j+2][i-1]; _t_119_ += u_0[k+1][j+2][i+1]; double _t_103_ = u_0[k+1][j-2][i+1]; _t_103_ -= u_0[k+1][j+2][i+1]; _t_103_ += 8.0 * -u_0[k+1][j-1][i+1]; _t_103_ += 8.0 * u_0[k+1][j+1][i+1]; double _t_98_ = -u_0[k+1][j-1][i-2]; _t_98_ += u_0[k+1][j+1][i-2]; double _t_106_ = -u_0[k+1][j-1][i+2]; _t_106_ += u_0[k+1][j+1][i+2]; double _t_95_ = -(8.0 * mu[k+1][j][i-1] * _t_100_); _t_95_ += 8.0 * mu[k+1][j][i+1] * _t_103_; _v_92_ += mu[k+1][j][i-2] * 8.0 * _t_98_; _t_95_ += _v_92_; _v_93_ += mu[k+1][j][i+2] * 8.0 * _t_106_; _t_95_ -= _v_93_; _t_93_ += _t_94_ * _t_95_; _v_95_ += la[k+1][j-2][i] * 8.0 * _t_111_; double _t_108_ = _v_95_; _t_108_ -= 8.0 * la[k+1][j-1][i] * _t_113_; _t_108_ += 8.0 * la[k+1][j+1][i] * _t_116_; _v_96_ += la[k+1][j+2][i] * 8.0 * _t_119_; _t_108_ -= _v_96_; double _t_107_ = _t_94_; _t_93_ += _t_107_ * _t_108_; b_r2 += _t_93_; double _v_105_ = cof * b_r2; double uacc_1kp1jc0ic0 = _v_105_; uacc_1kp1jc0ic0 += a1 * uacc_1[k+1][j][i]; uacc_1[k][j][i] = uacc_1kc0jc0ic0; uacc_1[k+1][j][i] = uacc_1kp1jc0ic0; } } } __global__ void __launch_bounds__ (128,2) sw4_3 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0; double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1; double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2; double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4; double r1, r2, r3; if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 10 for (int k=2; k<=N-3; k++) { mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2]; mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1]; mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i]; mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2]; muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2]; muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1]; muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j]; muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2]; muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2]; muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1]; muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k]; muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2]; r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k][j][i-2] - u_2[k][j][i]) + mux2 * (u_2[k][j][i-1] - u_2[k][j][i]) + mux3 * (u_2[k][j][i+1] - u_2[k][j][i]) + mux4 * (u_2[k][j][i+2] - u_2[k][j][i])) + stry[j] * (muy1 * (u_2[k][j-2][i] - u_2[k][j][i]) + muy2 * (u_2[k][j-1][i] - u_2[k][j][i]) + muy3 * (u_2[k][j+1][i] - u_2[k][j][i]) + muy4 * (u_2[k][j+2][i] - u_2[k][j][i])) + strz[k] * ((2 * muz1 + la[k-1][j][i] * strz[k-1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k-2][j][i] * strz[k-2]) * (u_2[k-2][j][i] - u_2[k][j][i]) + (2 * muz2 + la[k-2][j][i] * strz[k-2] + la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k] + 3 * la[k-1][j][i] * strz[k-1]) * (u_2[k-1][j][i] - u_2[k][j][i]) + (2 * muz3 + la[k-1][j][i] * strz[k-1] + la[k+2][j][i] * strz[k+2] + 3 * la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k]) * (u_2[k+1][j][i] - u_2[k][j][i]) + (2 * muz4 + la[k+1][j][i] * strz[k+1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k+2][j][i] * strz[k+2]) * (u_2[k+2][j][i] - u_2[k][j][i]))); r3 += strx[i] * strz[k] * (1e0 / 144) * (mu[k][j][i-2] * (u_0[k-2][j][i-2] - u_0[k+2][j][i-2] + 8 * (-u_0[k-1][j][i-2] + u_0[k+1][j][i-2])) - 8 * (mu[k][j][i-1] * (u_0[k-2][j][i-1] - u_0[k+2][j][i-1] + 8 * (-u_0[k-1][j][i-1] + u_0[k+1][j][i-1]))) + 8 * (mu[k][j][i+1] * (u_0[k-2][j][i+1] - u_0[k+2][j][i+1] + 8 * (-u_0[k-1][j][i+1] + u_0[k+1][j][i+1]))) - (mu[k][j][i+2] * (u_0[k-2][j][i+2] - u_0[k+2][j][i+2] + 8 * (-u_0[k-1][j][i+2] + u_0[k+1][j][i+2])))); r3 += stry[j] * strz[k] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k-2][j-2][i] - u_1[k+2][j-2][i] + 8 * (-u_1[k-1][j-2][i] + u_1[k+1][j-2][i])) - 8 * (mu[k][j-1][i] * (u_1[k-2][j-1][i] - u_1[k+2][j-1][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k+1][j-1][i]))) + 8 * (mu[k][j+1][i] * (u_1[k-2][j+1][i] - u_1[k+2][j+1][i] + 8 * (-u_1[k-1][j+1][i] + u_1[k+1][j+1][i]))) - (mu[k][j+2][i] * (u_1[k-2][j+2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k-1][j+2][i] + u_1[k+1][j+2][i])))); r3 += strx[i] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_0[k-2][j][i-2] - u_0[k-2][j][i+2] + 8 * (-u_0[k-2][j][i-1] + u_0[k-2][j][i+1])) - 8 * (la[k-1][j][i] * (u_0[k-1][j][i-2] - u_0[k-1][j][i+2] + 8 * (-u_0[k-1][j][i-1] + u_0[k-1][j][i+1]))) + 8 * (la[k+1][j][i] * (u_0[k+1][j][i-2] - u_0[k+1][j][i+2] + 8 * (-u_0[k+1][j][i-1] + u_0[k+1][j][i+1]))) - (la[k+2][j][i] * (u_0[k+2][j][i-2] - u_0[k+2][j][i+2] + 8 * (-u_0[k+2][j][i-1] + u_0[k+2][j][i+1])))); r3 += stry[j] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_1[k-2][j-2][i] - u_1[k-2][j+2][i] + 8 * (-u_1[k-2][j-1][i] + u_1[k-2][j+1][i])) - 8 * (la[k-1][j][i] * (u_1[k-1][j-2][i] - u_1[k-1][j+2][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k-1][j+1][i]))) + 8 * (la[k+1][j][i] * (u_1[k+1][j-2][i] - u_1[k+1][j+2][i] + 8 * (-u_1[k+1][j-1][i] + u_1[k+1][j+1][i]))) - (la[k+2][j][i] * (u_1[k+2][j-2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k+2][j-1][i] + u_1[k+2][j+1][i])))); uacc_2[k][j][i] = a1 * uacc_2[k][j][i] + cof * r3; } } } extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) { double *uacc_0; hipMalloc (&uacc_0, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_0\n"); hipMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *uacc_1; hipMalloc (&uacc_1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_1\n"); hipMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *uacc_2; hipMalloc (&uacc_2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_2\n"); hipMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *u_0; hipMalloc (&u_0, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_0\n"); hipMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *u_1; hipMalloc (&u_1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_1\n"); hipMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *u_2; hipMalloc (&u_2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_2\n"); hipMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *mu; hipMalloc (&mu, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for mu\n"); hipMemcpy (mu, h_mu, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *la; hipMalloc (&la, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for la\n"); hipMemcpy (la, h_la, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *strx; hipMalloc (&strx, sizeof(double)*N); check_error ("Failed to allocate device memory for strx\n"); hipMemcpy (strx, h_strx, sizeof(double)*N, hipMemcpyHostToDevice); double *stry; hipMalloc (&stry, sizeof(double)*N); check_error ("Failed to allocate device memory for stry\n"); hipMemcpy (stry, h_stry, sizeof(double)*N, hipMemcpyHostToDevice); double *strz; hipMalloc (&strz, sizeof(double)*N); check_error ("Failed to allocate device memory for strz\n"); hipMemcpy (strz, h_strz, sizeof(double)*N, hipMemcpyHostToDevice); dim3 blockconfig (16, 8); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1); hipLaunchKernelGGL(( sw4_1) , dim3(gridconfig), dim3(blockconfig), 0, 0, uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N); hipLaunchKernelGGL(( sw4_2) , dim3(gridconfig), dim3(blockconfig), 0, 0, uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N); hipLaunchKernelGGL(( sw4_3) , dim3(gridconfig), dim3(blockconfig), 0, 0, uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N); hipMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, hipMemcpyDeviceToHost); hipMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, hipMemcpyDeviceToHost); hipMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, hipMemcpyDeviceToHost); hipFree (uacc_0); hipFree (uacc_1); hipFree (uacc_2); hipFree (u_0); hipFree (u_1); hipFree (u_2); hipFree (mu); hipFree (la); hipFree (strx); hipFree (stry); hipFree (strz); }
1af01bb310c06d533d12a5df05b1eb371ee70567.cu
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void __launch_bounds__ (128,2) sw4_1 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0; double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1; double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2; double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4; double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4; double a_r1, b_r1; if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 3 for (int k=2; k<=N-3; k+=2) { a_mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2]; a_mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1]; a_mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i]; a_mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2]; a_muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2]; a_muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1]; a_muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j]; a_muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2]; a_muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2]; a_muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1]; a_muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k]; a_muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2]; a_r1 = 1e0 / 6 * (strx[i] * ((2 * a_mux1 + la[k][j][i-1] * strx[i-1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i-2] * strx[i-2]) * (u_0[k][j][i-2] - u_0[k][j][i]) + (2 * a_mux2 + la[k][j][i-2] * strx[i-2] + la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i] + 3 * la[k][j][i-1] * strx[i-1]) * (u_0[k][j][i-1] - u_0[k][j][i]) + (2 * a_mux3 + la[k][j][i-1] * strx[i-1] + la[k][j][i+2] * strx[i+2] + 3 * la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i]) * (u_0[k][j][i+1] - u_0[k][j][i]) + (2 * a_mux4 + la[k][j][i+1] * strx[i+1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i+2] * strx[i+2]) * (u_0[k][j][i+2] - u_0[k][j][i])) + stry[j] * (a_muy1 * (u_0[k][j-2][i] - u_0[k][j][i]) + a_muy2 * (u_0[k][j-1][i] - u_0[k][j][i]) + a_muy3 * (u_0[k][j+1][i] - u_0[k][j][i]) + a_muy4 * (u_0[k][j+2][i] - u_0[k][j][i])) + strz[k] * (a_muz1 * (u_0[k-2][j][i] - u_0[k][j][i]) + a_muz2 * (u_0[k-1][j][i] - u_0[k][j][i]) + a_muz3 * (u_0[k+1][j][i] - u_0[k][j][i]) + a_muz4 * (u_0[k+2][j][i] - u_0[k][j][i]))); a_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k][j][i-2] * (u_1[k][j-2][i-2] - u_1[k][j+2][i-2] + 8 * (-u_1[k][j-1][i-2] + u_1[k][j+1][i-2])) - 8 * (la[k][j][i-1] * (u_1[k][j-2][i-1] - u_1[k][j+2][i-1] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j+1][i-1]))) + 8 * (la[k][j][i+1] * (u_1[k][j-2][i+1] - u_1[k][j+2][i+1] + 8 * (-u_1[k][j-1][i+1] + u_1[k][j+1][i+1]))) - (la[k][j][i+2] * (u_1[k][j-2][i+2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j-1][i+2] + u_1[k][j+1][i+2])))); a_r1 += strx[i] * strz[k] * (1e0 / 144) * (la[k][j][i-2] * (u_2[k-2][j][i-2] - u_2[k+2][j][i-2] + 8 * (-u_2[k-1][j][i-2] + u_2[k+1][j][i-2])) - 8 * (la[k][j][i-1] * (u_2[k-2][j][i-1] - u_2[k+2][j][i-1] + 8 * (-u_2[k-1][j][i-1] + u_2[k+1][j][i-1]))) + 8 * (la[k][j][i+1] * (u_2[k-2][j][i+1] - u_2[k+2][j][i+1] + 8 * (-u_2[k-1][j][i+1] + u_2[k+1][j][i+1]))) - (la[k][j][i+2] * (u_2[k-2][j][i+2] - u_2[k+2][j][i+2] + 8 * (-u_2[k-1][j][i+2] + u_2[k+1][j][i+2])))); a_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k][j-2][i-2] - u_1[k][j-2][i+2] + 8 * (-u_1[k][j-2][i-1] + u_1[k][j-2][i+1])) - 8 * (mu[k][j-1][i] * (u_1[k][j-1][i-2] - u_1[k][j-1][i+2] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j-1][i+1]))) + 8 * (mu[k][j+1][i] * (u_1[k][j+1][i-2] - u_1[k][j+1][i+2] + 8 * (-u_1[k][j+1][i-1] + u_1[k][j+1][i+1]))) - (mu[k][j+2][i] * (u_1[k][j+2][i-2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j+2][i-1] + u_1[k][j+2][i+1])))); a_r1 += strx[i] * strz[k] * (1e0 / 144) * (mu[k-2][j][i] * (u_2[k-2][j][i-2] - u_2[k-2][j][i+2] + 8 * (-u_2[k-2][j][i-1] + u_2[k-2][j][i+1])) - 8 * (mu[k-1][j][i] * (u_2[k-1][j][i-2] - u_2[k-1][j][i+2] + 8 * (-u_2[k-1][j][i-1] + u_2[k-1][j][i+1]))) + 8 * (mu[k+1][j][i] * (u_2[k+1][j][i-2] - u_2[k+1][j][i+2] + 8 * (-u_2[k+1][j][i-1] + u_2[k+1][j][i+1]))) - (mu[k+2][j][i] * (u_2[k+2][j][i-2] - u_2[k+2][j][i+2] + 8 * (-u_2[k+2][j][i-1] + u_2[k+2][j][i+1])))); uacc_0[k][j][i] = a1 * uacc_0[k][j][i] + cof * a_r1; b_mux1 = mu[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i-2] * strx[i-2]; b_mux2 = mu[k+1][j][i-2] * strx[i-2] + mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i] + 3.0 * mu[k+1][j][i-1] * strx[i-1]; b_mux3 = mu[k+1][j][i-1] * strx[i-1] + mu[k+1][j][i+2] * strx[i+2] + 3.0 * mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i]; b_mux4 = mu[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i+2] * strx[i+2]; b_muy1 = mu[k+1][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] -3e0 / 4 * mu[k+1][j-2][i] * stry[j-2]; b_muy2 = mu[k+1][j-2][i] * stry[j-2] + mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j] + 3.0 * mu[k+1][j-1][i] * stry[j-1]; b_muy3 = mu[k+1][j-1][i] * stry[j-1] + mu[k+1][j+2][i] * stry[j+2] + 3.0 * mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j]; b_muy4 = mu[k+1][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] - 3e0 / 4 * mu[k+1][j+2][i] * stry[j+2]; b_muz1 = mu[k+1-1][j][i] * strz[k+1-1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k+1-2][j][i] * strz[k+1-2]; b_muz2 = mu[k+1-2][j][i] * strz[k+1-2] + mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k+1-1][j][i] * strz[k+1-1]; b_muz3 = mu[k+1-1][j][i] * strz[k+1-1] + mu[k+1+2][j][i] * strz[k+1+2] + 3.0 * mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1]; b_muz4 = mu[k+1+1][j][i] * strz[k+1+1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 /4 * mu[k+1+2][j][i] * strz[k+1+2]; b_r1 = 1e0 / 6 * (strx[i] * ((2 * b_mux1 + la[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i-2] * strx[i-2]) * (u_0[k+1][j][i-2] - u_0[k+1][j][i]) + (2 * b_mux2 + la[k+1][j][i-2] * strx[i-2] + la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i] + 3 * la[k+1][j][i-1] * strx[i-1]) * (u_0[k+1][j][i-1] - u_0[k+1][j][i]) + (2 * b_mux3 + la[k+1][j][i-1] * strx[i-1] + la[k+1][j][i+2] * strx[i+2] + 3 * la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i]) * (u_0[k+1][j][i+1] - u_0[k+1][j][i]) + (2 * b_mux4 + la[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i+2] * strx[i+2]) * (u_0[k+1][j][i+2] - u_0[k+1][j][i])) + stry[j] * (b_muy1 * (u_0[k+1][j-2][i] - u_0[k+1][j][i]) + b_muy2 * (u_0[k+1][j-1][i] - u_0[k+1][j][i]) + b_muy3 * (u_0[k+1][j+1][i] - u_0[k+1][j][i]) + b_muy4 * (u_0[k+1][j+2][i] - u_0[k+1][j][i])) + strz[k+1] * (b_muz1 * (u_0[k+1-2][j][i] - u_0[k+1][j][i]) + b_muz2 * (u_0[k+1-1][j][i] - u_0[k+1][j][i]) + b_muz3 * (u_0[k+1+1][j][i] - u_0[k+1][j][i]) + b_muz4 * (u_0[k+1+2][j][i] - u_0[k+1][j][i]))); b_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k+1][j][i-2] * (u_1[k+1][j-2][i-2] - u_1[k+1][j+2][i-2] + 8 * (-u_1[k+1][j-1][i-2] + u_1[k+1][j+1][i-2])) - 8 * (la[k+1][j][i-1] * (u_1[k+1][j-2][i-1] - u_1[k+1][j+2][i-1] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j+1][i-1]))) + 8 * (la[k+1][j][i+1] * (u_1[k+1][j-2][i+1] - u_1[k+1][j+2][i+1] + 8 * (-u_1[k+1][j-1][i+1] + u_1[k+1][j+1][i+1]))) - (la[k+1][j][i+2] * (u_1[k+1][j-2][i+2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j-1][i+2] + u_1[k+1][j+1][i+2])))); b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (la[k+1][j][i-2] * (u_2[k+1-2][j][i-2] - u_2[k+1+2][j][i-2] + 8 * (-u_2[k+1-1][j][i-2] + u_2[k+1+1][j][i-2])) - 8 * (la[k+1][j][i-1] * (u_2[k+1-2][j][i-1] - u_2[k+1+2][j][i-1] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1+1][j][i-1]))) + 8 * (la[k+1][j][i+1] * (u_2[k+1-2][j][i+1] - u_2[k+1+2][j][i+1] + 8 * (-u_2[k+1-1][j][i+1] + u_2[k+1+1][j][i+1]))) - (la[k+1][j][i+2] * (u_2[k+1-2][j][i+2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1-1][j][i+2] + u_2[k+1+1][j][i+2])))); b_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k+1][j-2][i] * (u_1[k+1][j-2][i-2] - u_1[k+1][j-2][i+2] + 8 * (-u_1[k+1][j-2][i-1] + u_1[k+1][j-2][i+1])) - 8 * (mu[k+1][j-1][i] * (u_1[k+1][j-1][i-2] - u_1[k+1][j-1][i+2] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j-1][i+1]))) + 8 * (mu[k+1][j+1][i] * (u_1[k+1][j+1][i-2] - u_1[k+1][j+1][i+2] + 8 * (-u_1[k+1][j+1][i-1] + u_1[k+1][j+1][i+1]))) - (mu[k+1][j+2][i] * (u_1[k+1][j+2][i-2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j+2][i-1] + u_1[k+1][j+2][i+1])))); b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (mu[k+1-2][j][i] * (u_2[k+1-2][j][i-2] - u_2[k+1-2][j][i+2] + 8 * (-u_2[k+1-2][j][i-1] + u_2[k+1-2][j][i+1])) - 8 * (mu[k+1-1][j][i] * (u_2[k+1-1][j][i-2] - u_2[k+1-1][j][i+2] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1-1][j][i+1]))) + 8 * (mu[k+1+1][j][i] * (u_2[k+1+1][j][i-2] - u_2[k+1+1][j][i+2] + 8 * (-u_2[k+1+1][j][i-1] + u_2[k+1+1][j][i+1]))) - (mu[k+1+2][j][i] * (u_2[k+1+2][j][i-2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1+2][j][i-1] + u_2[k+1+2][j][i+1])))); uacc_0[k+1][j][i] = a1 * uacc_0[k+1][j][i] + cof * b_r1; } } } __global__ void __launch_bounds__ (128,2) sw4_2 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0; double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1; double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2; double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4; double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4; double a_r2, b_r2; if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 3 for (int k=2; k<=N-3; k+=2) { a_mux1 = mu[k][j][i-1] * strx[i-1]; a_mux1 -= 3.0 / 4.0 * mu[k][j][i] * strx[i]; a_mux1 -= 3.0 / 4.0 * mu[k][j][i-2] * strx[i-2]; a_mux2 = mu[k][j][i-2] * strx[i-2]; a_mux2 += mu[k][j][i+1] * strx[i+1]; a_mux2 += 3.0 * mu[k][j][i] * strx[i]; a_mux2 += 3.0 * mu[k][j][i-1] * strx[i-1]; a_mux3 = mu[k][j][i-1] * strx[i-1]; a_mux3 += mu[k][j][i+2] * strx[i+2]; a_mux3 += 3.0 * mu[k][j][i+1] * strx[i+1]; a_mux3 += 3.0 * mu[k][j][i] * strx[i]; a_mux4 = mu[k][j][i+1] * strx[i+1]; a_mux4 -= 3.0 / 4.0 * mu[k][j][i] * strx[i]; a_mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2]; a_muy1 = mu[k][j-1][i] * stry[j-1]; a_muy1 -= 3.0 / 4.0 * mu[k][j][i] * stry[j]; a_muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2]; a_muy2 = mu[k][j-2][i] * stry[j-2]; a_muy2 += mu[k][j+1][i] * stry[j+1]; a_muy2 += 3.0 * mu[k][j][i] * stry[j]; a_muy2 += 3.0 * mu[k][j-1][i] * stry[j-1]; a_muy3 = mu[k][j-1][i] * stry[j-1]; a_muy3 += mu[k][j+2][i] * stry[j+2]; a_muy3 += 3.0 * mu[k][j+1][i] * stry[j+1]; a_muy3 += 3.0 * mu[k][j][i] * stry[j]; a_muy4 = mu[k][j+1][i] * stry[j+1]; a_muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2]; a_muy4 -= 3.0 / 4.0 * mu[k][j][i] * stry[j]; a_muz1 = mu[k-1][j][i] * strz[k-1]; a_muz1 -= 3.0 / 4.0 * mu[k][j][i] * strz[k]; a_muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2]; a_muz2 = mu[k-2][j][i] * strz[k-2]; a_muz2 += mu[k+1][j][i] * strz[k+1]; a_muz2 += 3.0 * mu[k][j][i] * strz[k]; a_muz2 += 3.0 * mu[k-1][j][i] * strz[k-1]; a_muz3 = mu[k-1][j][i] * strz[k-1]; a_muz3 += mu[k+2][j][i] * strz[k+2]; a_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1]; a_muz3 += 3.0 * mu[k][j][i] * strz[k]; a_muz4 = mu[k+1][j][i] * strz[k+1]; a_muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k]; a_muz4 -= 3.0 / 4.0 * mu[k+2][j][i] * strz[k+2]; double _v_18_ = a_mux1 * u_1[k][j][i-2]; _v_18_ -= a_mux1 * u_1[k][j][i]; double _v_22_ = strx[i] * _v_18_; double _v_19_ = a_mux2 * u_1[k][j][i-1]; _v_19_ -= a_mux2 * u_1[k][j][i]; _v_22_ += strx[i] * _v_19_; double _v_20_ = a_mux3 * u_1[k][j][i+1]; _v_20_ -= a_mux3 * u_1[k][j][i]; _v_22_ += strx[i] * _v_20_; double _v_21_ = a_mux4 * u_1[k][j][i+2]; _v_21_ -= a_mux4 * u_1[k][j][i]; _v_22_ += strx[i] * _v_21_; a_r2 = 1.0 / 6.0 * _v_22_; double _t_8_ = u_1[k][j-2][i]; _t_8_ -= u_1[k][j][i]; double _t_7_ = 2.0 * a_muy1; double _v_23_ = la[k][j-1][i] * stry[j-1]; _t_7_ += _v_23_; _t_7_ -= 3.0 / 4.0 * la[k][j][i] * stry[j]; double _t_9_ = 3.0 * la[k][j][i] * stry[j]; _t_9_ += 3.0 * la[k][j-1][i] * stry[j-1]; _t_9_ += 2.0 * a_muy2; double _t_11_ = 3.0 * la[k][j][i] * stry[j]; _t_11_ += 2.0 * a_muy3; double _t_13_ = -(3.0 / 4.0 * la[k][j][i] * stry[j]); _t_13_ += 2.0 * a_muy4; _t_7_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2]; double _v_24_ = _t_7_ * _t_8_; double _v_33_ = stry[j] * _v_24_; _t_9_ += la[k][j-2][i] * stry[j-2]; _t_9_ += la[k][j+1][i] * stry[j+1]; double _t_10_ = u_1[k][j-1][i]; _t_10_ -= u_1[k][j][i]; double _v_27_ = _t_9_ * _t_10_; _v_33_ += stry[j] * _v_27_; _t_11_ += _v_23_; _t_11_ += la[k][j+2][i] * stry[j+2]; _t_11_ += 3.0 * la[k][j+1][i] * stry[j+1]; double _t_12_ = u_1[k][j+1][i]; _t_12_ -= u_1[k][j][i]; double _v_30_ = _t_11_ * _t_12_; _v_33_ += stry[j] * _v_30_; _t_13_ += la[k][j+1][i] * stry[j+1]; _t_13_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2]; double _t_14_ = u_1[k][j+2][i]; _t_14_ -= u_1[k][j][i]; double _v_32_ = _t_13_ * _t_14_; _v_33_ += stry[j] * _v_32_; a_r2 += 1.0 / 6.0 * _v_33_; double _v_34_ = a_muz1 * u_1[k-2][j][i]; _v_34_ -= a_muz1 * u_1[k][j][i]; double _v_38_ = strz[k] * _v_34_; double _v_35_ = a_muz2 * u_1[k-1][j][i]; _v_35_ -= a_muz2 * u_1[k][j][i]; _v_38_ += strz[k] * _v_35_; double _v_36_ = a_muz3 * u_1[k+1][j][i]; _v_36_ -= a_muz3 * u_1[k][j][i]; _v_38_ += strz[k] * _v_36_; double _v_37_ = a_muz4 * u_1[k+2][j][i]; _v_37_ -= a_muz4 * u_1[k][j][i]; _v_38_ += strz[k] * _v_37_; a_r2 += 1.0 / 6.0 * _v_38_; double _t_27_ = u_0[k][j-2][i-1]; _t_27_ -= u_0[k][j+2][i-1]; _t_27_ += 8.0 * -u_0[k][j-1][i-1]; _t_27_ += 8.0 * u_0[k][j+1][i-1]; double _t_38_ = -u_0[k][j-2][i-1]; _t_38_ += u_0[k][j-2][i+1]; double _t_40_ = 8.0 * -u_0[k][j-1][i-1]; _t_40_ += 8.0 * u_0[k][j-1][i+1]; _t_40_ += u_0[k][j-1][i-2]; _t_40_ -= u_0[k][j-1][i+2]; double _t_43_ = 8.0 * -u_0[k][j+1][i-1]; _t_43_ += 8.0 * u_0[k][j+1][i+1]; _t_43_ += u_0[k][j+1][i-2]; _t_43_ -= u_0[k][j+1][i+2]; double _t_46_ = -u_0[k][j+2][i-1]; _t_46_ += u_0[k][j+2][i+1]; double _t_30_ = u_0[k][j-2][i+1]; _t_30_ -= u_0[k][j+2][i+1]; _t_30_ += 8.0 * -u_0[k][j-1][i+1]; _t_30_ += 8.0 * u_0[k][j+1][i+1]; double _t_25_ = -u_0[k][j-1][i-2]; _t_25_ += u_0[k][j+1][i-2]; double _t_33_ = -u_0[k][j-1][i+2]; _t_33_ += u_0[k][j+1][i+2]; double _t_22_ = -(8.0 * mu[k][j][i-1] * _t_27_); _t_22_ += 8.0 * mu[k][j][i+1] * _t_30_; double _v_39_ = mu[k][j][i-2] * u_0[k][j-2][i-2]; _v_39_ += mu[k][j][i-2] * 8.0 * _t_25_; _v_39_ -= mu[k][j][i-2] * u_0[k][j+2][i-2]; double _v_42_ = la[k][j-2][i] * u_0[k][j-2][i-2]; _v_42_ -= la[k][j-2][i] * u_0[k][j-2][i+2]; _v_42_ += la[k][j-2][i] * 8.0 * _t_38_; double _v_43_ = la[k][j+2][i] * u_0[k][j+2][i-2]; _v_43_ -= la[k][j+2][i] * u_0[k][j+2][i+2]; _v_43_ += la[k][j+2][i] * 8.0 * _t_46_; double _v_45_ = la[k][j-2][i] * u_2[k-2][j-2][i]; double _v_46_ = la[k][j+2][i] * u_2[k-2][j+2][i]; double _v_48_ = mu[k-2][j][i] * u_2[k-2][j-2][i]; _v_48_ -= mu[k-2][j][i] * u_2[k-2][j+2][i]; double _v_40_ = mu[k][j][i+2] * u_0[k][j-2][i+2]; _v_40_ += mu[k][j][i+2] * 8.0 * _t_33_; _v_40_ -= mu[k][j][i+2] * u_0[k][j+2][i+2]; _t_22_ += _v_39_; _t_22_ -= _v_40_; double _t_21_ = 1.0 / 144.0 * strx[i] * stry[j]; double _t_20_ = _t_21_ * _t_22_; double _t_35_ = _v_42_; _t_35_ -= 8.0 * la[k][j-1][i] * _t_40_; _t_35_ += 8.0 * la[k][j+1][i] * _t_43_; _t_35_ -= _v_43_; double _t_34_ = 1.0 / 144.0 * strx[i] * stry[j]; _t_20_ += _t_34_ * _t_35_; double _t_53_ = u_2[k-2][j-1][i]; _t_53_ += 8.0 * u_2[k+1][j-1][i]; double _t_64_ = -u_2[k-2][j-1][i]; _t_64_ += u_2[k-2][j+1][i]; double _t_69_ = 8.0 * -u_2[k+1][j-1][i]; _t_69_ += 8.0 * u_2[k+1][j+1][i]; _t_69_ += u_2[k+1][j-2][i]; _t_69_ -= u_2[k+1][j+2][i]; double _t_56_ = u_2[k-2][j+1][i]; _t_56_ += 8.0 * u_2[k+1][j+1][i]; double _t_51_ = u_2[k+1][j-2][i]; double _t_59_ = u_2[k+1][j+2][i]; _t_53_ -= u_2[k+2][j-1][i]; _t_53_ += 8.0 * -u_2[k-1][j-1][i]; double _t_48_ = -(8.0 * la[k][j-1][i] * _t_53_); _t_56_ -= u_2[k+2][j+1][i]; _t_56_ += 8.0 * -u_2[k-1][j+1][i]; _t_48_ += 8.0 * la[k][j+1][i] * _t_56_; _t_51_ += -u_2[k-1][j-2][i]; _v_45_ += la[k][j-2][i] * 8.0 * _t_51_; _v_45_ -= la[k][j-2][i] * u_2[k+2][j-2][i]; _t_48_ += _v_45_; _t_59_ += -u_2[k-1][j+2][i]; _v_46_ += la[k][j+2][i] * 8.0 * _t_59_; _v_46_ -= la[k][j+2][i] * u_2[k+2][j+2][i]; _t_48_ -= _v_46_; double _t_47_ = 1.0 / 144.0 * stry[j] * strz[k]; _t_20_ += _t_47_ * _t_48_; _v_48_ += mu[k-2][j][i] * 8.0 * _t_64_; double _t_61_ = _v_48_; _t_61_ += 8.0 * mu[k+1][j][i] * _t_69_; double _t_66_ = u_2[k-1][j-2][i]; _t_66_ -= u_2[k-1][j+2][i]; _t_66_ += 8.0 * -u_2[k-1][j-1][i]; _t_66_ += 8.0 * u_2[k-1][j+1][i]; _t_61_ -= 8.0 * mu[k-1][j][i] * _t_66_; double _t_72_ = -u_2[k+2][j-1][i]; _t_72_ += u_2[k+2][j+1][i]; double _v_49_ = mu[k+2][j][i] * 8.0 * _t_72_; _v_49_ += mu[k+2][j][i] * u_2[k+2][j-2][i]; _v_49_ -= mu[k+2][j][i] * u_2[k+2][j+2][i]; _t_61_ -= _v_49_; double _t_60_ = 1.0 / 144.0 * stry[j] * strz[k]; _t_20_ += _t_60_ * _t_61_; a_r2 += _t_20_; double uacc_1kc0jc0ic0 = cof * a_r2; uacc_1kc0jc0ic0 += a1 * uacc_1[k][j][i]; b_mux1 = mu[k+1][j][i-1] * strx[i-1]; b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i] * strx[i]; b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i-2] * strx[i-2]; b_mux2 = mu[k+1][j][i-2] * strx[i-2]; b_mux2 += mu[k+1][j][i+1] * strx[i+1]; b_mux2 += 3.0 * mu[k+1][j][i] * strx[i]; b_mux2 += 3.0 * mu[k+1][j][i-1] * strx[i-1]; b_mux3 = mu[k+1][j][i-1] * strx[i-1]; b_mux3 += mu[k+1][j][i+2] * strx[i+2]; b_mux3 += 3.0 * mu[k+1][j][i+1] * strx[i+1]; b_mux3 += 3.0 * mu[k+1][j][i] * strx[i]; b_mux4 = mu[k+1][j][i+1] * strx[i+1]; b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i+2] * strx[i+2]; b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i] * strx[i]; b_muy1 = mu[k+1][j-1][i] * stry[j-1]; b_muy1 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j]; b_muy1 -= 3.0 / 4.0 * mu[k+1][j-2][i] * stry[j-2]; b_muy2 = mu[k+1][j-2][i] * stry[j-2]; b_muy2 += mu[k+1][j+1][i] * stry[j+1]; b_muy2 += 3.0 * mu[k+1][j][i] * stry[j]; b_muy2 += 3.0 * mu[k+1][j-1][i] * stry[j-1]; b_muy3 = mu[k+1][j-1][i] * stry[j-1]; b_muy3 += mu[k+1][j+2][i] * stry[j+2]; b_muy3 += 3.0 * mu[k+1][j+1][i] * stry[j+1]; b_muy3 += 3.0 * mu[k+1][j][i] * stry[j]; b_muy4 = mu[k+1][j+1][i] * stry[j+1]; b_muy4 -= 3.0 / 4.0 * mu[k+1][j+2][i] * stry[j+2]; b_muy4 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j]; b_muz1 = mu[k][j][i] * strz[k]; b_muz1 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1]; b_muz1 -= 3.0 / 4.0 * mu[k-1][j][i] * strz[k-1]; b_muz2 = mu[k-1][j][i] * strz[k-1]; b_muz2 += mu[k+2][j][i] * strz[k+2]; b_muz2 += 3.0 * mu[k+1][j][i] * strz[k+1]; b_muz2 += 3.0 * mu[k][j][i] * strz[k]; b_muz3 = mu[k][j][i] * strz[k]; b_muz3 += mu[k+3][j][i] * strz[k+3]; b_muz3 += 3.0 * mu[k+2][j][i] * strz[k+2]; b_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1]; b_muz4 = mu[k+2][j][i] * strz[k+2]; b_muz4 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1]; b_muz4 -= 3.0 / 4.0 * mu[k+3][j][i] * strz[k+3]; double _v_87_ = b_muz1 * u_1[k-1][j][i]; _v_87_ -= b_muz1 * u_1[k+1][j][i]; double _v_88_ = -(b_muz2 * u_1[k+1][j][i]); _v_88_ += b_muz2 * u_1[k][j][i]; double _v_89_ = -(b_muz3 * u_1[k+1][j][i]); _v_89_ += b_muz3 * u_1[k+2][j][i]; double _v_90_ = -(b_muz4 * u_1[k+1][j][i]); _v_90_ += b_muz4 * u_1[k+3][j][i]; double _v_71_ = -(b_mux1 * u_1[k+1][j][i]); _v_71_ += b_mux1 * u_1[k+1][j][i-2]; double _v_72_ = -(b_mux2 * u_1[k+1][j][i]); _v_72_ += b_mux2 * u_1[k+1][j][i-1]; double _v_73_ = -(b_mux3 * u_1[k+1][j][i]); _v_73_ += b_mux3 * u_1[k+1][j][i+1]; double _v_74_ = -(b_mux4 * u_1[k+1][j][i]); _v_74_ += b_mux4 * u_1[k+1][j][i+2]; double _t_81_ = -(u_1[k+1][j][i]); _t_81_ += u_1[k+1][j-2][i]; double _t_83_ = -(u_1[k+1][j][i]); _t_83_ += u_1[k+1][j-1][i]; double _t_85_ = -(u_1[k+1][j][i]); _t_85_ += u_1[k+1][j+1][i]; double _t_87_ = -(u_1[k+1][j][i]); _t_87_ += u_1[k+1][j+2][i]; double _v_91_ = strz[k+1] * _v_87_; _v_91_ += strz[k+1] * _v_88_; _v_91_ += strz[k+1] * _v_89_; _v_91_ += strz[k+1] * _v_90_; b_r2 = 1.0 / 6.0 * _v_91_; double _v_75_ = strx[i] * _v_71_; _v_75_ += strx[i] * _v_72_; _v_75_ += strx[i] * _v_73_; _v_75_ += strx[i] * _v_74_; b_r2 += 1.0 / 6.0 * _v_75_; double _t_80_ = 2.0 * b_muy1; double _v_76_ = la[k+1][j-1][i] * stry[j-1]; _t_80_ += _v_76_; _t_80_ -= 3.0 / 4.0 * la[k+1][j][i] * stry[j]; double _t_82_ = 3.0 * la[k+1][j][i] * stry[j]; _t_82_ += 3.0 * la[k+1][j-1][i] * stry[j-1]; _t_82_ += 2.0 * b_muy2; double _t_84_ = 3.0 * la[k+1][j][i] * stry[j]; _t_84_ += 2.0 * b_muy3; double _t_86_ = -(3.0 / 4.0 * la[k+1][j][i] * stry[j]); _t_86_ += 2.0 * b_muy4; _t_80_ -= 3.0 / 4.0 * la[k+1][j-2][i] * stry[j-2]; double _v_77_ = _t_80_ * _t_81_; double _v_86_ = stry[j] * _v_77_; _t_82_ += la[k+1][j-2][i] * stry[j-2]; double _v_79_ = la[k+1][j+1][i] * stry[j+1]; _t_84_ += 3.0 * la[k+1][j+1][i] * stry[j+1]; _t_82_ += _v_79_; double _v_80_ = _t_82_ * _t_83_; _v_86_ += stry[j] * _v_80_; _t_84_ += _v_76_; _t_84_ += la[k+1][j+2][i] * stry[j+2]; _t_86_ -= 3.0 / 4.0 * la[k+1][j+2][i] * stry[j+2]; double _v_83_ = _t_84_ * _t_85_; _v_86_ += stry[j] * _v_83_; _t_86_ += _v_79_; double _v_85_ = _t_86_ * _t_87_; _v_86_ += stry[j] * _v_85_; b_r2 += 1.0 / 6.0 * _v_86_; double _v_101_ = mu[k-1][j][i] * u_2[k-1][j-2][i]; _v_101_ -= mu[k-1][j][i] * u_2[k-1][j+2][i]; double _v_98_ = la[k+1][j-2][i] * u_2[k-1][j-2][i]; _v_98_ -= la[k+1][j-2][i] * u_2[k+3][j-2][i]; double _v_99_ = la[k+1][j+2][i] * u_2[k-1][j+2][i]; _v_99_ -= la[k+1][j+2][i] * u_2[k+3][j+2][i]; double _v_95_ = la[k+1][j-2][i] * u_0[k+1][j-2][i-2]; _v_95_ -= la[k+1][j-2][i] * u_0[k+1][j-2][i+2]; double _v_96_ = la[k+1][j+2][i] * u_0[k+1][j+2][i-2]; _v_96_ -= la[k+1][j+2][i] * u_0[k+1][j+2][i+2]; double _v_102_ = mu[k+3][j][i] * u_2[k+3][j-2][i]; _v_102_ -= mu[k+3][j][i] * u_2[k+3][j+2][i]; double _v_92_ = mu[k+1][j][i-2] * u_0[k+1][j-2][i-2]; _v_92_ -= mu[k+1][j][i-2] * u_0[k+1][j+2][i-2]; double _v_93_ = mu[k+1][j][i+2] * u_0[k+1][j-2][i+2]; _v_93_ -= mu[k+1][j][i+2] * u_0[k+1][j+2][i+2]; double _t_137_ = -u_2[k-1][j-1][i]; _t_137_ += u_2[k-1][j+1][i]; double _t_126_ = u_2[k-1][j-1][i]; _t_126_ -= u_2[k+3][j-1][i]; _t_126_ += 8.0 * -u_2[k][j-1][i]; _t_126_ += 8.0 * u_2[k+2][j-1][i]; double _t_129_ = u_2[k-1][j+1][i]; _t_129_ -= u_2[k+3][j+1][i]; _t_129_ += 8.0 * -u_2[k][j+1][i]; _t_129_ += 8.0 * u_2[k+2][j+1][i]; double _t_142_ = 8.0 * -u_2[k+2][j-1][i]; _t_142_ += 8.0 * u_2[k+2][j+1][i]; _t_142_ += u_2[k+2][j-2][i]; _t_142_ -= u_2[k+2][j+2][i]; double _t_139_ = 8.0 * -u_2[k][j-1][i]; _t_139_ += 8.0 * u_2[k][j+1][i]; _t_139_ += u_2[k][j-2][i]; _t_139_ -= u_2[k][j+2][i]; double _t_145_ = -u_2[k+3][j-1][i]; _t_145_ += u_2[k+3][j+1][i]; double _t_124_ = -u_2[k][j-2][i]; _t_124_ += u_2[k+2][j-2][i]; double _t_132_ = -u_2[k][j+2][i]; _t_132_ += u_2[k+2][j+2][i]; _v_101_ += mu[k-1][j][i] * 8.0 * _t_137_; double _t_134_ = _v_101_; _t_134_ += 8.0 * mu[k+2][j][i] * _t_142_; _t_134_ -= 8.0 * mu[k][j][i] * _t_139_; _v_102_ += mu[k+3][j][i] * 8.0 * _t_145_; _t_134_ -= _v_102_; double _t_133_ = 1.0 / 144.0 * stry[j] * strz[k+1]; double _t_94_ = 1.0 / 144.0 * strx[i] * stry[j]; double _t_93_ = _t_133_ * _t_134_; _v_98_ += la[k+1][j-2][i] * 8.0 * _t_124_; double _t_121_ = _v_98_; _t_121_ -= 8.0 * la[k+1][j-1][i] * _t_126_; _t_121_ += 8.0 * la[k+1][j+1][i] * _t_129_; _v_99_ += la[k+1][j+2][i] * 8.0 * _t_132_; _t_121_ -= _v_99_; double _t_120_ = _t_133_; _t_93_ += _t_120_ * _t_121_; double _t_100_ = u_0[k+1][j-2][i-1]; _t_100_ -= u_0[k+1][j+2][i-1]; _t_100_ += 8.0 * -u_0[k+1][j-1][i-1]; _t_100_ += 8.0 * u_0[k+1][j+1][i-1]; double _t_111_ = -u_0[k+1][j-2][i-1]; _t_111_ += u_0[k+1][j-2][i+1]; double _t_113_ = 8.0 * -u_0[k+1][j-1][i-1]; _t_113_ += 8.0 * u_0[k+1][j-1][i+1]; _t_113_ += u_0[k+1][j-1][i-2]; _t_113_ -= u_0[k+1][j-1][i+2]; double _t_116_ = 8.0 * -u_0[k+1][j+1][i-1]; _t_116_ += 8.0 * u_0[k+1][j+1][i+1]; _t_116_ += u_0[k+1][j+1][i-2]; _t_116_ -= u_0[k+1][j+1][i+2]; double _t_119_ = -u_0[k+1][j+2][i-1]; _t_119_ += u_0[k+1][j+2][i+1]; double _t_103_ = u_0[k+1][j-2][i+1]; _t_103_ -= u_0[k+1][j+2][i+1]; _t_103_ += 8.0 * -u_0[k+1][j-1][i+1]; _t_103_ += 8.0 * u_0[k+1][j+1][i+1]; double _t_98_ = -u_0[k+1][j-1][i-2]; _t_98_ += u_0[k+1][j+1][i-2]; double _t_106_ = -u_0[k+1][j-1][i+2]; _t_106_ += u_0[k+1][j+1][i+2]; double _t_95_ = -(8.0 * mu[k+1][j][i-1] * _t_100_); _t_95_ += 8.0 * mu[k+1][j][i+1] * _t_103_; _v_92_ += mu[k+1][j][i-2] * 8.0 * _t_98_; _t_95_ += _v_92_; _v_93_ += mu[k+1][j][i+2] * 8.0 * _t_106_; _t_95_ -= _v_93_; _t_93_ += _t_94_ * _t_95_; _v_95_ += la[k+1][j-2][i] * 8.0 * _t_111_; double _t_108_ = _v_95_; _t_108_ -= 8.0 * la[k+1][j-1][i] * _t_113_; _t_108_ += 8.0 * la[k+1][j+1][i] * _t_116_; _v_96_ += la[k+1][j+2][i] * 8.0 * _t_119_; _t_108_ -= _v_96_; double _t_107_ = _t_94_; _t_93_ += _t_107_ * _t_108_; b_r2 += _t_93_; double _v_105_ = cof * b_r2; double uacc_1kp1jc0ic0 = _v_105_; uacc_1kp1jc0ic0 += a1 * uacc_1[k+1][j][i]; uacc_1[k][j][i] = uacc_1kc0jc0ic0; uacc_1[k+1][j][i] = uacc_1kp1jc0ic0; } } } __global__ void __launch_bounds__ (128,2) sw4_3 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0; double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1; double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2; double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4; double r1, r2, r3; if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 10 for (int k=2; k<=N-3; k++) { mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2]; mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1]; mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i]; mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2]; muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2]; muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1]; muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j]; muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2]; muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2]; muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1]; muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k]; muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2]; r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k][j][i-2] - u_2[k][j][i]) + mux2 * (u_2[k][j][i-1] - u_2[k][j][i]) + mux3 * (u_2[k][j][i+1] - u_2[k][j][i]) + mux4 * (u_2[k][j][i+2] - u_2[k][j][i])) + stry[j] * (muy1 * (u_2[k][j-2][i] - u_2[k][j][i]) + muy2 * (u_2[k][j-1][i] - u_2[k][j][i]) + muy3 * (u_2[k][j+1][i] - u_2[k][j][i]) + muy4 * (u_2[k][j+2][i] - u_2[k][j][i])) + strz[k] * ((2 * muz1 + la[k-1][j][i] * strz[k-1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k-2][j][i] * strz[k-2]) * (u_2[k-2][j][i] - u_2[k][j][i]) + (2 * muz2 + la[k-2][j][i] * strz[k-2] + la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k] + 3 * la[k-1][j][i] * strz[k-1]) * (u_2[k-1][j][i] - u_2[k][j][i]) + (2 * muz3 + la[k-1][j][i] * strz[k-1] + la[k+2][j][i] * strz[k+2] + 3 * la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k]) * (u_2[k+1][j][i] - u_2[k][j][i]) + (2 * muz4 + la[k+1][j][i] * strz[k+1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k+2][j][i] * strz[k+2]) * (u_2[k+2][j][i] - u_2[k][j][i]))); r3 += strx[i] * strz[k] * (1e0 / 144) * (mu[k][j][i-2] * (u_0[k-2][j][i-2] - u_0[k+2][j][i-2] + 8 * (-u_0[k-1][j][i-2] + u_0[k+1][j][i-2])) - 8 * (mu[k][j][i-1] * (u_0[k-2][j][i-1] - u_0[k+2][j][i-1] + 8 * (-u_0[k-1][j][i-1] + u_0[k+1][j][i-1]))) + 8 * (mu[k][j][i+1] * (u_0[k-2][j][i+1] - u_0[k+2][j][i+1] + 8 * (-u_0[k-1][j][i+1] + u_0[k+1][j][i+1]))) - (mu[k][j][i+2] * (u_0[k-2][j][i+2] - u_0[k+2][j][i+2] + 8 * (-u_0[k-1][j][i+2] + u_0[k+1][j][i+2])))); r3 += stry[j] * strz[k] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k-2][j-2][i] - u_1[k+2][j-2][i] + 8 * (-u_1[k-1][j-2][i] + u_1[k+1][j-2][i])) - 8 * (mu[k][j-1][i] * (u_1[k-2][j-1][i] - u_1[k+2][j-1][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k+1][j-1][i]))) + 8 * (mu[k][j+1][i] * (u_1[k-2][j+1][i] - u_1[k+2][j+1][i] + 8 * (-u_1[k-1][j+1][i] + u_1[k+1][j+1][i]))) - (mu[k][j+2][i] * (u_1[k-2][j+2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k-1][j+2][i] + u_1[k+1][j+2][i])))); r3 += strx[i] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_0[k-2][j][i-2] - u_0[k-2][j][i+2] + 8 * (-u_0[k-2][j][i-1] + u_0[k-2][j][i+1])) - 8 * (la[k-1][j][i] * (u_0[k-1][j][i-2] - u_0[k-1][j][i+2] + 8 * (-u_0[k-1][j][i-1] + u_0[k-1][j][i+1]))) + 8 * (la[k+1][j][i] * (u_0[k+1][j][i-2] - u_0[k+1][j][i+2] + 8 * (-u_0[k+1][j][i-1] + u_0[k+1][j][i+1]))) - (la[k+2][j][i] * (u_0[k+2][j][i-2] - u_0[k+2][j][i+2] + 8 * (-u_0[k+2][j][i-1] + u_0[k+2][j][i+1])))); r3 += stry[j] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_1[k-2][j-2][i] - u_1[k-2][j+2][i] + 8 * (-u_1[k-2][j-1][i] + u_1[k-2][j+1][i])) - 8 * (la[k-1][j][i] * (u_1[k-1][j-2][i] - u_1[k-1][j+2][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k-1][j+1][i]))) + 8 * (la[k+1][j][i] * (u_1[k+1][j-2][i] - u_1[k+1][j+2][i] + 8 * (-u_1[k+1][j-1][i] + u_1[k+1][j+1][i]))) - (la[k+2][j][i] * (u_1[k+2][j-2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k+2][j-1][i] + u_1[k+2][j+1][i])))); uacc_2[k][j][i] = a1 * uacc_2[k][j][i] + cof * r3; } } } extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) { double *uacc_0; cudaMalloc (&uacc_0, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_0\n"); cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *uacc_1; cudaMalloc (&uacc_1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_1\n"); cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *uacc_2; cudaMalloc (&uacc_2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_2\n"); cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u_0; cudaMalloc (&u_0, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_0\n"); cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u_1; cudaMalloc (&u_1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_1\n"); cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u_2; cudaMalloc (&u_2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_2\n"); cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *mu; cudaMalloc (&mu, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for mu\n"); cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *la; cudaMalloc (&la, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for la\n"); cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *strx; cudaMalloc (&strx, sizeof(double)*N); check_error ("Failed to allocate device memory for strx\n"); cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice); double *stry; cudaMalloc (&stry, sizeof(double)*N); check_error ("Failed to allocate device memory for stry\n"); cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice); double *strz; cudaMalloc (&strz, sizeof(double)*N); check_error ("Failed to allocate device memory for strz\n"); cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice); dim3 blockconfig (16, 8); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1); sw4_1 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N); sw4_2 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N); sw4_3 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N); cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); cudaFree (uacc_0); cudaFree (uacc_1); cudaFree (uacc_2); cudaFree (u_0); cudaFree (u_1); cudaFree (u_2); cudaFree (mu); cudaFree (la); cudaFree (strx); cudaFree (stry); cudaFree (strz); }
70b8832b639448e28932255ad5e220bdfb58153d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <fstream> #include <time.h> // Generic utils typedef float3 pixel; void check_result(hipError_t value) { hipError_t status = value; if (status != hipSuccess) { printf("Error %s at line %d in file %s\n", hipGetErrorString(status), __LINE__, __FILE__); // exit(1); } } __device__ float3 operator+(const float3 &a, const float3 &b) { return make_float3(a.x + b.x,a.y + b.y,a.z + b.z); } __device__ float3 operator*(const float3 &a, const float &b) { return make_float3(a.x * b, a.y * b, a.z * b); } __device__ float length(const float3 &vec) { return sqrt(vec.x * vec.x + vec.y * vec.y + vec.z * vec.z); } __device__ float3 normalize(const float3 vec) { float inverted_len = 1.0f / length(vec); return vec * inverted_len; } // Raymarcher typedef struct { float3 o; float3 d; } ray; __device__ ray get_ray(const float& u, const float& v) { ray r; r.o = make_float3(-5.0, 0.0, 0.0); r.d = normalize(make_float3(1.0, u, v)); return r; } __device__ float mandelbulb_de(float3 pos) { // pos = fmod(fabs(pos), 4.0) - 2.0; float3 z = pos; float dr = 1.0; float r = 0.0; int Iterations = 4; float Bailout = 4.0; float Power = 16.0; for(int i = 0; i < Iterations; i++) { r = length(z); if (r > Bailout) break; // convert to polar coordinates float theta = acos(z.z / r); float phi = atan2(z.y, z.x); dr = powf(r, Power - 1.0) * Power * dr + 1.0; // scale and rotate the point float zr = pow(r, Power); theta = theta * Power; phi = phi * Power; // convert back to cartesian coordinates z = make_float3(sin(theta) * cos(phi), sin(phi) * sin(theta), cos(theta)) * zr; z = z + pos; //z += pos * cos(time * 2.0); } return 0.5 * log(r) * r / dr; } __device__ float march(ray r) { float total_dist = 0.0; int max_ray_steps = 64; float min_distance = 0.002; int steps; for (steps = 0; steps < max_ray_steps; ++steps) { float3 p = r.o + r.d * total_dist; float distance = mandelbulb_de(p); total_dist += distance; if (distance < min_distance) break; } return 1.0 - (float) steps / (float) max_ray_steps; } // Main kernel __global__ void d_main( pixel* screen_buffer, const size_t width, const size_t height ) { size_t x = (blockIdx.x * blockDim.x) + threadIdx.x; size_t y = (blockIdx.y * blockDim.y) + threadIdx.y; if(x < width && y < height) { float min_w_h = (float) min(width, height); float ar = (float) width / (float) height; float u = (float) x / min_w_h - ar * 0.5f; float v = (float) y / min_w_h - 0.5f; ray r = get_ray(u, v); float c = march(r) * 255.0f; float3 color = make_float3(c, c, c); screen_buffer[y * width + x] = color; } } void write_image( char* file_name, pixel* screen_buff, size_t width, size_t height ) { FILE* image = fopen(file_name, "w"); fprintf(image, "P3\n"); fprintf(image, "%i %i\n", width, height); fprintf(image, "%i\n", 255); for (size_t y = 0; y < height; y++) { for (size_t x = 0; x < width; x++) { float3 pixel = screen_buff[y * width + x]; fprintf(image, "%i %i %i\n", (int) pixel.x, (int) pixel.y, (int) pixel.z); } } fclose(image); } int main(int argc, char** argv) { // printf("Mandelbulb\n"); if(argc < 7) { printf("Not enought params.\n"); return 1; } char* file_name = argv[1]; size_t width = atoi(argv[2]); size_t height = atoi(argv[3]); size_t num_pixels = width * height; size_t group_width = atoi(argv[4]); size_t group_height = atoi(argv[5]); bool test = false; if (*argv[6] == 't') { test = true; } // Setup buffers pixel* h_screen_buff; pixel* d_screen_buff; check_result(hipHostMalloc(&h_screen_buff, num_pixels * sizeof(pixel))); check_result(hipMalloc(&d_screen_buff, num_pixels * sizeof(pixel))); dim3 block_dim(width / group_width, height / group_height); dim3 group_dim(group_width, group_height); // Execute on devicie clock_t t_start = clock(); if(!test) printf("Starting kernel execution...\n"); hipLaunchKernelGGL(( d_main), dim3(block_dim), dim3(group_dim), 0, 0, d_screen_buff, width, height); if(!test) printf("Kernel execution ended.\n"); if(!test) printf("Reading screan buffer from device...\n"); check_result(hipMemcpy(h_screen_buff, d_screen_buff, num_pixels * sizeof(pixel), hipMemcpyDeviceToHost)); if(!test) printf("Done.\n"); printf("Time taken (ms): %i\n", (int) ((double) (clock() - t_start) / CLOCKS_PER_SEC * 1000.0f)); if(!test){ printf("Writing to file...\n"); write_image(file_name, h_screen_buff, width, height); printf("Done\n"); } //for(size_t y = 0;y < height;y++) { // for(size_t x = 0;x < width;x++) { // printf("%i ", (int) h_screen_buff[y * width + x].x); // } // printf("\n"); //} hipHostFree(h_screen_buff); hipFree(d_screen_buff); return 0; }
70b8832b639448e28932255ad5e220bdfb58153d.cu
#include <stdio.h> #include <fstream> #include <time.h> // Generic utils typedef float3 pixel; void check_result(cudaError_t value) { cudaError_t status = value; if (status != cudaSuccess) { printf("Error %s at line %d in file %s\n", cudaGetErrorString(status), __LINE__, __FILE__); // exit(1); } } __device__ float3 operator+(const float3 &a, const float3 &b) { return make_float3(a.x + b.x,a.y + b.y,a.z + b.z); } __device__ float3 operator*(const float3 &a, const float &b) { return make_float3(a.x * b, a.y * b, a.z * b); } __device__ float length(const float3 &vec) { return sqrt(vec.x * vec.x + vec.y * vec.y + vec.z * vec.z); } __device__ float3 normalize(const float3 vec) { float inverted_len = 1.0f / length(vec); return vec * inverted_len; } // Raymarcher typedef struct { float3 o; float3 d; } ray; __device__ ray get_ray(const float& u, const float& v) { ray r; r.o = make_float3(-5.0, 0.0, 0.0); r.d = normalize(make_float3(1.0, u, v)); return r; } __device__ float mandelbulb_de(float3 pos) { // pos = fmod(fabs(pos), 4.0) - 2.0; float3 z = pos; float dr = 1.0; float r = 0.0; int Iterations = 4; float Bailout = 4.0; float Power = 16.0; for(int i = 0; i < Iterations; i++) { r = length(z); if (r > Bailout) break; // convert to polar coordinates float theta = acos(z.z / r); float phi = atan2(z.y, z.x); dr = powf(r, Power - 1.0) * Power * dr + 1.0; // scale and rotate the point float zr = pow(r, Power); theta = theta * Power; phi = phi * Power; // convert back to cartesian coordinates z = make_float3(sin(theta) * cos(phi), sin(phi) * sin(theta), cos(theta)) * zr; z = z + pos; //z += pos * cos(time * 2.0); } return 0.5 * log(r) * r / dr; } __device__ float march(ray r) { float total_dist = 0.0; int max_ray_steps = 64; float min_distance = 0.002; int steps; for (steps = 0; steps < max_ray_steps; ++steps) { float3 p = r.o + r.d * total_dist; float distance = mandelbulb_de(p); total_dist += distance; if (distance < min_distance) break; } return 1.0 - (float) steps / (float) max_ray_steps; } // Main kernel __global__ void d_main( pixel* screen_buffer, const size_t width, const size_t height ) { size_t x = (blockIdx.x * blockDim.x) + threadIdx.x; size_t y = (blockIdx.y * blockDim.y) + threadIdx.y; if(x < width && y < height) { float min_w_h = (float) min(width, height); float ar = (float) width / (float) height; float u = (float) x / min_w_h - ar * 0.5f; float v = (float) y / min_w_h - 0.5f; ray r = get_ray(u, v); float c = march(r) * 255.0f; float3 color = make_float3(c, c, c); screen_buffer[y * width + x] = color; } } void write_image( char* file_name, pixel* screen_buff, size_t width, size_t height ) { FILE* image = fopen(file_name, "w"); fprintf(image, "P3\n"); fprintf(image, "%i %i\n", width, height); fprintf(image, "%i\n", 255); for (size_t y = 0; y < height; y++) { for (size_t x = 0; x < width; x++) { float3 pixel = screen_buff[y * width + x]; fprintf(image, "%i %i %i\n", (int) pixel.x, (int) pixel.y, (int) pixel.z); } } fclose(image); } int main(int argc, char** argv) { // printf("Mandelbulb\n"); if(argc < 7) { printf("Not enought params.\n"); return 1; } char* file_name = argv[1]; size_t width = atoi(argv[2]); size_t height = atoi(argv[3]); size_t num_pixels = width * height; size_t group_width = atoi(argv[4]); size_t group_height = atoi(argv[5]); bool test = false; if (*argv[6] == 't') { test = true; } // Setup buffers pixel* h_screen_buff; pixel* d_screen_buff; check_result(cudaMallocHost(&h_screen_buff, num_pixels * sizeof(pixel))); check_result(cudaMalloc(&d_screen_buff, num_pixels * sizeof(pixel))); dim3 block_dim(width / group_width, height / group_height); dim3 group_dim(group_width, group_height); // Execute on devicie clock_t t_start = clock(); if(!test) printf("Starting kernel execution...\n"); d_main<<<block_dim, group_dim>>>(d_screen_buff, width, height); if(!test) printf("Kernel execution ended.\n"); if(!test) printf("Reading screan buffer from device...\n"); check_result(cudaMemcpy(h_screen_buff, d_screen_buff, num_pixels * sizeof(pixel), cudaMemcpyDeviceToHost)); if(!test) printf("Done.\n"); printf("Time taken (ms): %i\n", (int) ((double) (clock() - t_start) / CLOCKS_PER_SEC * 1000.0f)); if(!test){ printf("Writing to file...\n"); write_image(file_name, h_screen_buff, width, height); printf("Done\n"); } //for(size_t y = 0;y < height;y++) { // for(size_t x = 0;x < width;x++) { // printf("%i ", (int) h_screen_buff[y * width + x].x); // } // printf("\n"); //} cudaFreeHost(h_screen_buff); cudaFree(d_screen_buff); return 0; }
8a6c1d0a44e270b26fc3527e29fedd0d29049228.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! \file VL_3D_cuda.cu * \brief Definitions of the cuda 3D VL algorithm functions. */ #ifdef CUDA #ifdef VL #include<stdio.h> #include<stdlib.h> #include<math.h> #include<cuda.h> #include"global.h" #include"global_cuda.h" #include"hydro_cuda.h" #include"VL_3D_cuda.h" #include"pcm_cuda.h" #include"plmp_cuda.h" #include"plmc_cuda.h" #include"ppmp_cuda.h" #include"ppmc_cuda.h" #include"exact_cuda.h" #include"roe_cuda.h" #include"hllc_cuda.h" #include"h_correction_3D_cuda.h" #include"cooling_cuda.h" #include"subgrid_routines_3D.h" #include"io.h" __global__ void Update_Conserved_Variables_3D_half(Real *dev_conserved, Real *dev_conserved_half, Real *dev_F_x, Real *dev_F_y, Real *dev_F_z, int nx, int ny, int nz, int n_ghost, Real dx, Real dy, Real dz, Real dt, Real gamma, int n_fields); Real VL_Algorithm_3D_CUDA(Real *host_conserved0, Real *host_conserved1, int nx, int ny, int nz, int x_off, int y_off, int z_off, int n_ghost, Real dx, Real dy, Real dz, Real xbound, Real ybound, Real zbound, Real dt, int n_fields) { //Here, *host_conserved contains the entire //set of conserved variables on the grid //concatenated into a 1-d array //host_conserved0 contains the values at time n, //host_conserved1 will contain the values at time n+1 // Initialize dt values Real max_dti = 0; #ifdef COOLING_GPU Real min_dt = 1e10; #endif if ( !block_size ) { // calculate the dimensions for the subgrid blocks sub_dimensions_3D(nx, ny, nz, n_ghost, &nx_s, &ny_s, &nz_s, &block1_tot, &block2_tot, &block3_tot, &remainder1, &remainder2, &remainder3, n_fields); //printf("Subgrid dimensions set: %d %d %d %d %d %d %d %d %d\n", nx_s, ny_s, nz_s, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3); //fflush(stdout); block_tot = block1_tot*block2_tot*block3_tot; // number of cells in one subgrid block BLOCK_VOL = nx_s*ny_s*nz_s; // dimensions for the 1D GPU grid ngrid = (BLOCK_VOL + TPB - 1) / TPB; #ifndef DYNAMIC_GPU_ALLOC block_size = true; #endif } // set values for GPU kernels // number of blocks per 1D grid dim3 dim1dGrid(ngrid, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB, 1, 1); // Set up pointers for the location to copy from and to if (block_tot == 1) { tmp1 = host_conserved0; tmp2 = host_conserved1; } if ( !memory_allocated ){ // allocate buffer to copy conserved variable blocks to/from if (block_tot > 1) { if ( NULL == ( buffer = (Real *) malloc(n_fields*BLOCK_VOL*sizeof(Real)) ) ) { printf("Failed to allocate CPU buffer.\n"); } tmp1 = buffer; tmp2 = buffer; } // allocate an array on the CPU to hold max_dti returned from each thread block host_dti_array = (Real *) malloc(ngrid*sizeof(Real)); #ifdef COOLING_GPU host_dt_array = (Real *) malloc(ngrid*sizeof(Real)); #endif // allocate memory on the GPU CudaSafeCall( hipMalloc((void**)&dev_conserved, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&dev_conserved_half, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&Q_Lx, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&Q_Rx, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&Q_Ly, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&Q_Ry, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&Q_Lz, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&Q_Rz, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&F_x, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&F_y, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&F_z, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&dev_dti_array, ngrid*sizeof(Real)) ); #ifdef COOLING_GPU CudaSafeCall( hipMalloc((void**)&dev_dt_array, ngrid*sizeof(Real)) ); #endif #ifndef DYNAMIC_GPU_ALLOC // If memory is single allocated: memory_allocated becomes true and succesive timesteps won't allocate memory. // If the memory is not single allocated: memory_allocated remains Null and memory is allocated every timestep. memory_allocated = true; #endif } // counter for which block we're on int block = 0; // START LOOP OVER SUBGRID BLOCKS while (block < block_tot) { // copy the conserved variable block to the buffer host_copy_block_3D(nx, ny, nz, nx_s, ny_s, nz_s, n_ghost, block, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3, BLOCK_VOL, host_conserved0, buffer, n_fields); // calculate the global x, y, and z offsets of this subgrid block get_offsets_3D(nx_s, ny_s, nz_s, n_ghost, x_off, y_off, z_off, block, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3, &x_off_s, &y_off_s, &z_off_s); // copy the conserved variables onto the GPU CudaSafeCall( hipMemcpy(dev_conserved, tmp1, n_fields*BLOCK_VOL*sizeof(Real), hipMemcpyHostToDevice) ); // Step 1: Use PCM reconstruction to put primitive variables into interface arrays hipLaunchKernelGGL(( PCM_Reconstruction_3D), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, gama, n_fields); CudaCheckError(); // Step 2: Calculate first-order upwind fluxes #ifdef EXACT hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields); #endif //EXACT #ifdef ROE hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields); #endif //ROE CudaCheckError(); #ifdef HLLC hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields); #endif //HLLC CudaCheckError(); // Step 3: Update the conserved variables half a timestep hipLaunchKernelGGL(( Update_Conserved_Variables_3D_half), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, dev_conserved_half, F_x, F_y, F_z, nx_s, ny_s, nz_s, n_ghost, dx, dy, dz, 0.5*dt, gama, n_fields); CudaCheckError(); // Step 4: Construct left and right interface values using updated conserved variables #ifdef PCM hipLaunchKernelGGL(( PCM_Reconstruction_3D), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Lx, Q_Rx, Q_Ly, Q_Ry, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, gama, n_fields); #endif #ifdef PLMP hipLaunchKernelGGL(( PLMP_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); hipLaunchKernelGGL(( PLMP_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); hipLaunchKernelGGL(( PLMP_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields); #endif //PLMP #ifdef PLMC hipLaunchKernelGGL(( PLMC_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); hipLaunchKernelGGL(( PLMC_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); hipLaunchKernelGGL(( PLMC_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields); #endif #ifdef PPMP hipLaunchKernelGGL(( PPMP_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); hipLaunchKernelGGL(( PPMP_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); hipLaunchKernelGGL(( PPMP_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields); #endif //PPMP #ifdef PPMC hipLaunchKernelGGL(( PPMC_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); hipLaunchKernelGGL(( PPMC_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); hipLaunchKernelGGL(( PPMC_cuda), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields); #endif //PPMC CudaCheckError(); // Step 5: Calculate the fluxes again #ifdef EXACT hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields); #endif //EXACT #ifdef ROE hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields); #endif //ROE #ifdef HLLC hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields); #endif //HLLC CudaCheckError(); #ifdef DE // Compute the divergence of Vel before updating the conserved array, this solves syncronization issues when adding this term on Update_Conserved_Variables_3D hipLaunchKernelGGL(( Partial_Update_Advected_Internal_Energy_3D), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dx, dy, dz, dt, gama, n_fields ); #endif // Step 6: Update the conserved variable array hipLaunchKernelGGL(( Update_Conserved_Variables_3D), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, F_x, F_y, F_z, nx_s, ny_s, nz_s, x_off_s, y_off_s, z_off_s, n_ghost, dx, dy, dz, xbound, ybound, zbound, dt, gama, n_fields); CudaCheckError(); #ifdef DE hipLaunchKernelGGL(( Select_Internal_Energy_3D), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, nz_s, n_ghost, n_fields); hipLaunchKernelGGL(( Sync_Energies_3D), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, nz_s, n_ghost, gama, n_fields); CudaCheckError(); #endif // Apply cooling #ifdef COOLING_GPU hipLaunchKernelGGL(( cooling_kernel), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, nz_s, n_ghost, n_fields, dt, gama, dev_dt_array); CudaCheckError(); #endif // Step 7: Calculate the next time step hipLaunchKernelGGL(( Calc_dt_3D), dim3(dim1dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, nz_s, n_ghost, dx, dy, dz, dev_dti_array, gama); CudaCheckError(); // copy the updated conserved variable array back to the CPU CudaSafeCall( hipMemcpy(tmp2, dev_conserved, n_fields*BLOCK_VOL*sizeof(Real), hipMemcpyDeviceToHost) ); // copy the updated conserved variable array from the buffer into the host_conserved array on the CPU host_return_block_3D(nx, ny, nz, nx_s, ny_s, nz_s, n_ghost, block, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3, BLOCK_VOL, host_conserved1, buffer, n_fields); // copy the dti array onto the CPU CudaSafeCall( hipMemcpy(host_dti_array, dev_dti_array, ngrid*sizeof(Real), hipMemcpyDeviceToHost) ); // find maximum inverse timestep from CFL condition for (int i=0; i<ngrid; i++) { max_dti = fmax(max_dti, host_dti_array[i]); } #ifdef COOLING_GPU // copy the dt array from cooling onto the CPU CudaSafeCall( hipMemcpy(host_dt_array, dev_dt_array, ngrid*sizeof(Real), hipMemcpyDeviceToHost) ); // find maximum inverse timestep from cooling time for (int i=0; i<ngrid; i++) { min_dt = fmin(min_dt, host_dt_array[i]); } if (min_dt < C_cfl/max_dti) { max_dti = C_cfl/min_dt; } #endif // add one to the counter block++; } #ifdef DYNAMIC_GPU_ALLOC // If memory is not single allocated then free the memory every timestep. Free_Memory_VL_3D(); #endif // return the maximum inverse timestep return max_dti; } void Free_Memory_VL_3D(){ // free CPU memory if (block_tot > 1) free(buffer); free(host_dti_array); #ifdef COOLING_GPU free(host_dt_array); #endif // free the GPU memory hipFree(dev_conserved); hipFree(dev_conserved_half); hipFree(Q_Lx); hipFree(Q_Rx); hipFree(Q_Ly); hipFree(Q_Ry); hipFree(Q_Lz); hipFree(Q_Rz); hipFree(F_x); hipFree(F_y); hipFree(F_z); hipFree(dev_dti_array); #ifdef COOLING_GPU hipFree(dev_dt_array); #endif } __global__ void Update_Conserved_Variables_3D_half(Real *dev_conserved, Real *dev_conserved_half, Real *dev_F_x, Real *dev_F_y, Real *dev_F_z, int nx, int ny, int nz, int n_ghost, Real dx, Real dy, Real dz, Real dt, Real gamma, int n_fields) { Real dtodx = dt/dx; Real dtody = dt/dy; Real dtodz = dt/dz; int n_cells = nx*ny*nz; // get a global thread ID int tid = threadIdx.x + blockIdx.x * blockDim.x; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; int id = xid + yid*nx + zid*nx*ny; int imo = xid-1 + yid*nx + zid*nx*ny; int jmo = xid + (yid-1)*nx + zid*nx*ny; int kmo = xid + yid*nx + (zid-1)*nx*ny; #ifdef DE Real d, d_inv, vx, vy, vz; Real vx_imo, vx_ipo, vy_jmo, vy_jpo, vz_kmo, vz_kpo, P; int ipo, jpo, kpo; #endif // threads corresponding to all cells except outer ring of ghost cells do the calculation if (xid > 0 && xid < nx-1 && yid > 0 && yid < ny-1 && zid > 0 && zid < nz-1) { #ifdef DE d = dev_conserved[ id]; d_inv = 1.0 / d; vx = dev_conserved[1*n_cells + id] * d_inv; vy = dev_conserved[2*n_cells + id] * d_inv; vz = dev_conserved[3*n_cells + id] * d_inv; P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0); //if (d < 0.0 || d != d) printf("Negative density before half step update.\n"); //if (P < 0.0) printf("%d Negative pressure before half step update.\n", id); ipo = xid+1 + yid*nx + zid*nx*ny; jpo = xid + (yid+1)*nx + zid*nx*ny; kpo = xid + yid*nx + (zid+1)*nx*ny; vx_imo = dev_conserved[1*n_cells + imo] / dev_conserved[imo]; vx_ipo = dev_conserved[1*n_cells + ipo] / dev_conserved[ipo]; vy_jmo = dev_conserved[2*n_cells + jmo] / dev_conserved[jmo]; vy_jpo = dev_conserved[2*n_cells + jpo] / dev_conserved[jpo]; vz_kmo = dev_conserved[3*n_cells + kmo] / dev_conserved[kmo]; vz_kpo = dev_conserved[3*n_cells + kpo] / dev_conserved[kpo]; #endif // update the conserved variable array dev_conserved_half[ id] = dev_conserved[ id] + dtodx * (dev_F_x[ imo] - dev_F_x[ id]) + dtody * (dev_F_y[ jmo] - dev_F_y[ id]) + dtodz * (dev_F_z[ kmo] - dev_F_z[ id]); dev_conserved_half[ n_cells + id] = dev_conserved[ n_cells + id] + dtodx * (dev_F_x[ n_cells + imo] - dev_F_x[ n_cells + id]) + dtody * (dev_F_y[ n_cells + jmo] - dev_F_y[ n_cells + id]) + dtodz * (dev_F_z[ n_cells + kmo] - dev_F_z[ n_cells + id]); dev_conserved_half[2*n_cells + id] = dev_conserved[2*n_cells + id] + dtodx * (dev_F_x[2*n_cells + imo] - dev_F_x[2*n_cells + id]) + dtody * (dev_F_y[2*n_cells + jmo] - dev_F_y[2*n_cells + id]) + dtodz * (dev_F_z[2*n_cells + kmo] - dev_F_z[2*n_cells + id]); dev_conserved_half[3*n_cells + id] = dev_conserved[3*n_cells + id] + dtodx * (dev_F_x[3*n_cells + imo] - dev_F_x[3*n_cells + id]) + dtody * (dev_F_y[3*n_cells + jmo] - dev_F_y[3*n_cells + id]) + dtodz * (dev_F_z[3*n_cells + kmo] - dev_F_z[3*n_cells + id]); dev_conserved_half[4*n_cells + id] = dev_conserved[4*n_cells + id] + dtodx * (dev_F_x[4*n_cells + imo] - dev_F_x[4*n_cells + id]) + dtody * (dev_F_y[4*n_cells + jmo] - dev_F_y[4*n_cells + id]) + dtodz * (dev_F_z[4*n_cells + kmo] - dev_F_z[4*n_cells + id]); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_conserved_half[(5+i)*n_cells + id] = dev_conserved[(5+i)*n_cells + id] + dtodx * (dev_F_x[(5+i)*n_cells + imo] - dev_F_x[(5+i)*n_cells + id]) + dtody * (dev_F_y[(5+i)*n_cells + jmo] - dev_F_y[(5+i)*n_cells + id]) + dtodz * (dev_F_z[(5+i)*n_cells + kmo] - dev_F_z[(5+i)*n_cells + id]); } #endif #ifdef DE dev_conserved_half[(n_fields-1)*n_cells + id] = dev_conserved[(n_fields-1)*n_cells + id] + dtodx * (dev_F_x[(n_fields-1)*n_cells + imo] - dev_F_x[(n_fields-1)*n_cells + id]) + dtody * (dev_F_y[(n_fields-1)*n_cells + jmo] - dev_F_y[(n_fields-1)*n_cells + id]) + dtodz * (dev_F_z[(n_fields-1)*n_cells + kmo] - dev_F_z[(n_fields-1)*n_cells + id]) + 0.5*P*(dtodx*(vx_imo-vx_ipo) + dtody*(vy_jmo-vy_jpo) + dtodz*(vz_kmo-vz_kpo)); #endif //if (dev_conserved_half[id] < 0.0 || dev_conserved_half[id] != dev_conserved_half[id] || dev_conserved_half[4*n_cells+id] < 0.0 || dev_conserved_half[4*n_cells+id] != dev_conserved_half[4*n_cells+id]) { //printf("%3d %3d %3d Thread crashed in half step update. d: %e E: %e\n", xid, yid, zid, dev_conserved_half[id], dev_conserved_half[4*n_cells+id]); //} } } #endif //VL #endif //CUDA
8a6c1d0a44e270b26fc3527e29fedd0d29049228.cu
/*! \file VL_3D_cuda.cu * \brief Definitions of the cuda 3D VL algorithm functions. */ #ifdef CUDA #ifdef VL #include<stdio.h> #include<stdlib.h> #include<math.h> #include<cuda.h> #include"global.h" #include"global_cuda.h" #include"hydro_cuda.h" #include"VL_3D_cuda.h" #include"pcm_cuda.h" #include"plmp_cuda.h" #include"plmc_cuda.h" #include"ppmp_cuda.h" #include"ppmc_cuda.h" #include"exact_cuda.h" #include"roe_cuda.h" #include"hllc_cuda.h" #include"h_correction_3D_cuda.h" #include"cooling_cuda.h" #include"subgrid_routines_3D.h" #include"io.h" __global__ void Update_Conserved_Variables_3D_half(Real *dev_conserved, Real *dev_conserved_half, Real *dev_F_x, Real *dev_F_y, Real *dev_F_z, int nx, int ny, int nz, int n_ghost, Real dx, Real dy, Real dz, Real dt, Real gamma, int n_fields); Real VL_Algorithm_3D_CUDA(Real *host_conserved0, Real *host_conserved1, int nx, int ny, int nz, int x_off, int y_off, int z_off, int n_ghost, Real dx, Real dy, Real dz, Real xbound, Real ybound, Real zbound, Real dt, int n_fields) { //Here, *host_conserved contains the entire //set of conserved variables on the grid //concatenated into a 1-d array //host_conserved0 contains the values at time n, //host_conserved1 will contain the values at time n+1 // Initialize dt values Real max_dti = 0; #ifdef COOLING_GPU Real min_dt = 1e10; #endif if ( !block_size ) { // calculate the dimensions for the subgrid blocks sub_dimensions_3D(nx, ny, nz, n_ghost, &nx_s, &ny_s, &nz_s, &block1_tot, &block2_tot, &block3_tot, &remainder1, &remainder2, &remainder3, n_fields); //printf("Subgrid dimensions set: %d %d %d %d %d %d %d %d %d\n", nx_s, ny_s, nz_s, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3); //fflush(stdout); block_tot = block1_tot*block2_tot*block3_tot; // number of cells in one subgrid block BLOCK_VOL = nx_s*ny_s*nz_s; // dimensions for the 1D GPU grid ngrid = (BLOCK_VOL + TPB - 1) / TPB; #ifndef DYNAMIC_GPU_ALLOC block_size = true; #endif } // set values for GPU kernels // number of blocks per 1D grid dim3 dim1dGrid(ngrid, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB, 1, 1); // Set up pointers for the location to copy from and to if (block_tot == 1) { tmp1 = host_conserved0; tmp2 = host_conserved1; } if ( !memory_allocated ){ // allocate buffer to copy conserved variable blocks to/from if (block_tot > 1) { if ( NULL == ( buffer = (Real *) malloc(n_fields*BLOCK_VOL*sizeof(Real)) ) ) { printf("Failed to allocate CPU buffer.\n"); } tmp1 = buffer; tmp2 = buffer; } // allocate an array on the CPU to hold max_dti returned from each thread block host_dti_array = (Real *) malloc(ngrid*sizeof(Real)); #ifdef COOLING_GPU host_dt_array = (Real *) malloc(ngrid*sizeof(Real)); #endif // allocate memory on the GPU CudaSafeCall( cudaMalloc((void**)&dev_conserved, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&dev_conserved_half, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&Q_Lx, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&Q_Rx, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&Q_Ly, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&Q_Ry, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&Q_Lz, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&Q_Rz, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&F_x, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&F_y, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&F_z, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&dev_dti_array, ngrid*sizeof(Real)) ); #ifdef COOLING_GPU CudaSafeCall( cudaMalloc((void**)&dev_dt_array, ngrid*sizeof(Real)) ); #endif #ifndef DYNAMIC_GPU_ALLOC // If memory is single allocated: memory_allocated becomes true and succesive timesteps won't allocate memory. // If the memory is not single allocated: memory_allocated remains Null and memory is allocated every timestep. memory_allocated = true; #endif } // counter for which block we're on int block = 0; // START LOOP OVER SUBGRID BLOCKS while (block < block_tot) { // copy the conserved variable block to the buffer host_copy_block_3D(nx, ny, nz, nx_s, ny_s, nz_s, n_ghost, block, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3, BLOCK_VOL, host_conserved0, buffer, n_fields); // calculate the global x, y, and z offsets of this subgrid block get_offsets_3D(nx_s, ny_s, nz_s, n_ghost, x_off, y_off, z_off, block, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3, &x_off_s, &y_off_s, &z_off_s); // copy the conserved variables onto the GPU CudaSafeCall( cudaMemcpy(dev_conserved, tmp1, n_fields*BLOCK_VOL*sizeof(Real), cudaMemcpyHostToDevice) ); // Step 1: Use PCM reconstruction to put primitive variables into interface arrays PCM_Reconstruction_3D<<<dim1dGrid,dim1dBlock>>>(dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, gama, n_fields); CudaCheckError(); // Step 2: Calculate first-order upwind fluxes #ifdef EXACT Calculate_Exact_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_Exact_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); Calculate_Exact_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields); #endif //EXACT #ifdef ROE Calculate_Roe_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_Roe_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); Calculate_Roe_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields); #endif //ROE CudaCheckError(); #ifdef HLLC Calculate_HLLC_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_HLLC_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); Calculate_HLLC_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields); #endif //HLLC CudaCheckError(); // Step 3: Update the conserved variables half a timestep Update_Conserved_Variables_3D_half<<<dim1dGrid,dim1dBlock>>>(dev_conserved, dev_conserved_half, F_x, F_y, F_z, nx_s, ny_s, nz_s, n_ghost, dx, dy, dz, 0.5*dt, gama, n_fields); CudaCheckError(); // Step 4: Construct left and right interface values using updated conserved variables #ifdef PCM PCM_Reconstruction_3D<<<dim1dGrid,dim1dBlock>>>(dev_conserved_half, Q_Lx, Q_Rx, Q_Ly, Q_Ry, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, gama, n_fields); #endif #ifdef PLMP PLMP_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); PLMP_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); PLMP_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved_half, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields); #endif //PLMP #ifdef PLMC PLMC_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); PLMC_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); PLMC_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved_half, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields); #endif #ifdef PPMP PPMP_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); PPMP_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); PPMP_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved_half, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields); #endif //PPMP #ifdef PPMC PPMC_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); PPMC_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); PPMC_cuda<<<dim1dGrid,dim1dBlock>>>(dev_conserved_half, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dz, dt, gama, 2, n_fields); #endif //PPMC CudaCheckError(); // Step 5: Calculate the fluxes again #ifdef EXACT Calculate_Exact_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_Exact_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); Calculate_Exact_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields); #endif //EXACT #ifdef ROE Calculate_Roe_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_Roe_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); Calculate_Roe_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields); #endif //ROE #ifdef HLLC Calculate_HLLC_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_HLLC_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); Calculate_HLLC_Fluxes_CUDA<<<dim1dGrid,dim1dBlock>>>(Q_Lz, Q_Rz, F_z, nx_s, ny_s, nz_s, n_ghost, gama, 2, n_fields); #endif //HLLC CudaCheckError(); #ifdef DE // Compute the divergence of Vel before updating the conserved array, this solves syncronization issues when adding this term on Update_Conserved_Variables_3D Partial_Update_Advected_Internal_Energy_3D<<<dim1dGrid,dim1dBlock>>>( dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, Q_Lz, Q_Rz, nx_s, ny_s, nz_s, n_ghost, dx, dy, dz, dt, gama, n_fields ); #endif // Step 6: Update the conserved variable array Update_Conserved_Variables_3D<<<dim1dGrid,dim1dBlock>>>(dev_conserved, F_x, F_y, F_z, nx_s, ny_s, nz_s, x_off_s, y_off_s, z_off_s, n_ghost, dx, dy, dz, xbound, ybound, zbound, dt, gama, n_fields); CudaCheckError(); #ifdef DE Select_Internal_Energy_3D<<<dim1dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, nz_s, n_ghost, n_fields); Sync_Energies_3D<<<dim1dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, nz_s, n_ghost, gama, n_fields); CudaCheckError(); #endif // Apply cooling #ifdef COOLING_GPU cooling_kernel<<<dim1dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, nz_s, n_ghost, n_fields, dt, gama, dev_dt_array); CudaCheckError(); #endif // Step 7: Calculate the next time step Calc_dt_3D<<<dim1dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, nz_s, n_ghost, dx, dy, dz, dev_dti_array, gama); CudaCheckError(); // copy the updated conserved variable array back to the CPU CudaSafeCall( cudaMemcpy(tmp2, dev_conserved, n_fields*BLOCK_VOL*sizeof(Real), cudaMemcpyDeviceToHost) ); // copy the updated conserved variable array from the buffer into the host_conserved array on the CPU host_return_block_3D(nx, ny, nz, nx_s, ny_s, nz_s, n_ghost, block, block1_tot, block2_tot, block3_tot, remainder1, remainder2, remainder3, BLOCK_VOL, host_conserved1, buffer, n_fields); // copy the dti array onto the CPU CudaSafeCall( cudaMemcpy(host_dti_array, dev_dti_array, ngrid*sizeof(Real), cudaMemcpyDeviceToHost) ); // find maximum inverse timestep from CFL condition for (int i=0; i<ngrid; i++) { max_dti = fmax(max_dti, host_dti_array[i]); } #ifdef COOLING_GPU // copy the dt array from cooling onto the CPU CudaSafeCall( cudaMemcpy(host_dt_array, dev_dt_array, ngrid*sizeof(Real), cudaMemcpyDeviceToHost) ); // find maximum inverse timestep from cooling time for (int i=0; i<ngrid; i++) { min_dt = fmin(min_dt, host_dt_array[i]); } if (min_dt < C_cfl/max_dti) { max_dti = C_cfl/min_dt; } #endif // add one to the counter block++; } #ifdef DYNAMIC_GPU_ALLOC // If memory is not single allocated then free the memory every timestep. Free_Memory_VL_3D(); #endif // return the maximum inverse timestep return max_dti; } void Free_Memory_VL_3D(){ // free CPU memory if (block_tot > 1) free(buffer); free(host_dti_array); #ifdef COOLING_GPU free(host_dt_array); #endif // free the GPU memory cudaFree(dev_conserved); cudaFree(dev_conserved_half); cudaFree(Q_Lx); cudaFree(Q_Rx); cudaFree(Q_Ly); cudaFree(Q_Ry); cudaFree(Q_Lz); cudaFree(Q_Rz); cudaFree(F_x); cudaFree(F_y); cudaFree(F_z); cudaFree(dev_dti_array); #ifdef COOLING_GPU cudaFree(dev_dt_array); #endif } __global__ void Update_Conserved_Variables_3D_half(Real *dev_conserved, Real *dev_conserved_half, Real *dev_F_x, Real *dev_F_y, Real *dev_F_z, int nx, int ny, int nz, int n_ghost, Real dx, Real dy, Real dz, Real dt, Real gamma, int n_fields) { Real dtodx = dt/dx; Real dtody = dt/dy; Real dtodz = dt/dz; int n_cells = nx*ny*nz; // get a global thread ID int tid = threadIdx.x + blockIdx.x * blockDim.x; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; int id = xid + yid*nx + zid*nx*ny; int imo = xid-1 + yid*nx + zid*nx*ny; int jmo = xid + (yid-1)*nx + zid*nx*ny; int kmo = xid + yid*nx + (zid-1)*nx*ny; #ifdef DE Real d, d_inv, vx, vy, vz; Real vx_imo, vx_ipo, vy_jmo, vy_jpo, vz_kmo, vz_kpo, P; int ipo, jpo, kpo; #endif // threads corresponding to all cells except outer ring of ghost cells do the calculation if (xid > 0 && xid < nx-1 && yid > 0 && yid < ny-1 && zid > 0 && zid < nz-1) { #ifdef DE d = dev_conserved[ id]; d_inv = 1.0 / d; vx = dev_conserved[1*n_cells + id] * d_inv; vy = dev_conserved[2*n_cells + id] * d_inv; vz = dev_conserved[3*n_cells + id] * d_inv; P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0); //if (d < 0.0 || d != d) printf("Negative density before half step update.\n"); //if (P < 0.0) printf("%d Negative pressure before half step update.\n", id); ipo = xid+1 + yid*nx + zid*nx*ny; jpo = xid + (yid+1)*nx + zid*nx*ny; kpo = xid + yid*nx + (zid+1)*nx*ny; vx_imo = dev_conserved[1*n_cells + imo] / dev_conserved[imo]; vx_ipo = dev_conserved[1*n_cells + ipo] / dev_conserved[ipo]; vy_jmo = dev_conserved[2*n_cells + jmo] / dev_conserved[jmo]; vy_jpo = dev_conserved[2*n_cells + jpo] / dev_conserved[jpo]; vz_kmo = dev_conserved[3*n_cells + kmo] / dev_conserved[kmo]; vz_kpo = dev_conserved[3*n_cells + kpo] / dev_conserved[kpo]; #endif // update the conserved variable array dev_conserved_half[ id] = dev_conserved[ id] + dtodx * (dev_F_x[ imo] - dev_F_x[ id]) + dtody * (dev_F_y[ jmo] - dev_F_y[ id]) + dtodz * (dev_F_z[ kmo] - dev_F_z[ id]); dev_conserved_half[ n_cells + id] = dev_conserved[ n_cells + id] + dtodx * (dev_F_x[ n_cells + imo] - dev_F_x[ n_cells + id]) + dtody * (dev_F_y[ n_cells + jmo] - dev_F_y[ n_cells + id]) + dtodz * (dev_F_z[ n_cells + kmo] - dev_F_z[ n_cells + id]); dev_conserved_half[2*n_cells + id] = dev_conserved[2*n_cells + id] + dtodx * (dev_F_x[2*n_cells + imo] - dev_F_x[2*n_cells + id]) + dtody * (dev_F_y[2*n_cells + jmo] - dev_F_y[2*n_cells + id]) + dtodz * (dev_F_z[2*n_cells + kmo] - dev_F_z[2*n_cells + id]); dev_conserved_half[3*n_cells + id] = dev_conserved[3*n_cells + id] + dtodx * (dev_F_x[3*n_cells + imo] - dev_F_x[3*n_cells + id]) + dtody * (dev_F_y[3*n_cells + jmo] - dev_F_y[3*n_cells + id]) + dtodz * (dev_F_z[3*n_cells + kmo] - dev_F_z[3*n_cells + id]); dev_conserved_half[4*n_cells + id] = dev_conserved[4*n_cells + id] + dtodx * (dev_F_x[4*n_cells + imo] - dev_F_x[4*n_cells + id]) + dtody * (dev_F_y[4*n_cells + jmo] - dev_F_y[4*n_cells + id]) + dtodz * (dev_F_z[4*n_cells + kmo] - dev_F_z[4*n_cells + id]); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_conserved_half[(5+i)*n_cells + id] = dev_conserved[(5+i)*n_cells + id] + dtodx * (dev_F_x[(5+i)*n_cells + imo] - dev_F_x[(5+i)*n_cells + id]) + dtody * (dev_F_y[(5+i)*n_cells + jmo] - dev_F_y[(5+i)*n_cells + id]) + dtodz * (dev_F_z[(5+i)*n_cells + kmo] - dev_F_z[(5+i)*n_cells + id]); } #endif #ifdef DE dev_conserved_half[(n_fields-1)*n_cells + id] = dev_conserved[(n_fields-1)*n_cells + id] + dtodx * (dev_F_x[(n_fields-1)*n_cells + imo] - dev_F_x[(n_fields-1)*n_cells + id]) + dtody * (dev_F_y[(n_fields-1)*n_cells + jmo] - dev_F_y[(n_fields-1)*n_cells + id]) + dtodz * (dev_F_z[(n_fields-1)*n_cells + kmo] - dev_F_z[(n_fields-1)*n_cells + id]) + 0.5*P*(dtodx*(vx_imo-vx_ipo) + dtody*(vy_jmo-vy_jpo) + dtodz*(vz_kmo-vz_kpo)); #endif //if (dev_conserved_half[id] < 0.0 || dev_conserved_half[id] != dev_conserved_half[id] || dev_conserved_half[4*n_cells+id] < 0.0 || dev_conserved_half[4*n_cells+id] != dev_conserved_half[4*n_cells+id]) { //printf("%3d %3d %3d Thread crashed in half step update. d: %e E: %e\n", xid, yid, zid, dev_conserved_half[id], dev_conserved_half[4*n_cells+id]); //} } } #endif //VL #endif //CUDA
99a308db8d35abb02c20db272f529f105b6337f7.hip
// !!! This is a file automatically generated by hipify!!! // generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit_unroll_width.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit_unroll_width<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>> epilogue, const ConvParam& param, float alpha, float beta, hipStream_t stream);
99a308db8d35abb02c20db272f529f105b6337f7.cu
// generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit_unroll_width.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit_unroll_width<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>> epilogue, const ConvParam& param, float alpha, float beta, cudaStream_t stream);
e04916baff2ca8906aff462b96b87dd71a9f24c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#include <hip/hip_fp16.h> #include "cuda_math.h" //#include "cuda_math_double.h" #include <stdio.h> #include <stdlib.h> #include <errno.h> #include "structs.cuh" #include "im2D.h" #include "im3D.hpp" int type_diag_flag=0; im3D_pars im3DHost; LBMParamsHost parsHost; __constant__ LBMParams pars; __constant__ int3 e[Qn]; __constant__ ftype3 ef[Qn]; __constant__ ftype w[Qn]; __constant__ int reverseX[Qn]; __constant__ int reverseY[Qn]; __constant__ int reverseZ[Qn]; __constant__ int reverseXYZ[Qn]; const char* FuncStr[] = {"Rho","Velocity","Mach","T","M4mom","Iterations","f0","f1","f2","f3","f4","f5","f6","f7","f8"}; //const char* FuncStr[] = {"F","G"}; __device__ float pow2(float v) { return v*v; } __global__ void lbm_draw_surf(float* buf){ } __global__ void lbm_draw(float* buf) { for(int ix=threadIdx.x; ix<pars.Nx; ix+=pars.Nx/4) { int iz=blockIdx.x; int iy=blockIdx.y; int xbuf=ix; int ybuf=iy; float* pbuf=&buf[xbuf+pars.Nx*(ybuf+pars.Ny*iz)]; ftype f[Qn]; Cell cell; const int2 gcind = Group::ind_conv(ix, iy, iz); const int gindex=gcind.x, cindex=gcind.y; pars.loadGroups[gindex].unpack(cell, cindex); for(int i=0; i<Qn; i++) f[i] = cell.f[i]; ftype rho=cell.rho; ftype4 uT=cell.uT; uT/=pars.dt; uT.w/=pars.dt; ftype M4mom=0; for(int i=0; i<Qn; i++) { ftype viX = sqrt(uT.w/TLat)*ef[i].x+uT.x; M4mom+= f[i]*viX*viX*viX*viX; } Cinfo& cinf = pars.cinfo[Cinfo::ind_zip(ix,iy,iz)]; int niter=cinf.niter; if(cinf.set==0) { *pbuf=0; continue; } switch(pars.nFunc) { case 0: *pbuf=cell.rho; break; //case 1: *pbuf=length(make_ftype3(cell.uT.x,cell.uT.y,cell.uT.z)); break; case 1: *pbuf=uT.x; break; case 2: *pbuf=uT.x/sqrt(uT.w); break; case 3: *pbuf=uT.w; break; case 4: *pbuf=M4mom; break; case 5: *pbuf=float(niter); break; case 6: *pbuf=f[0]; break; case 7: *pbuf=f[1]; break; case 8: *pbuf=f[2]; break; case 9: *pbuf=f[3]; break; case 10: *pbuf=f[4]; break; case 11: *pbuf=f[5]; break; case 12: *pbuf=f[6]; break; case 13: *pbuf=f[7]; break; case 14: *pbuf=f[8]; break; } } } void draw_all(){ hipLaunchKernelGGL(( lbm_draw), dim3(dim3(parsHost.Nz,parsHost.Ny)),dim3(parsHost.Nx/4), 0, 0, parsHost.arr4im.Arr3Dbuf); hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); im3DHost.initCuda(parsHost.arr4im); } void init(); void drop(std::string* fprefix); void calcStep(); void idle_func_calc::step() { for(int i=0;i<1; i++) calcStep(); im3DHost.save_png(parsHost.iStep); draw_all(); hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); recalc_at_once=true; } static void key_func(unsigned char key, int x, int y) { if(type_diag_flag>=2) printf("keyN=%d, coors=(%d,%d)\n", key, x, y); if(key == 'h') { printf("\ ======= mxw3D:\n\ <> \t : WEHSxEzEyExHxHyHzSySzeps\n\ Enter\t \n\ b \t (. )\n\ "); im3DHost.print_help(); return; } ftype t0; switch(key) { //case '>': if(parsHost.nFunc<parsHost.MaxFunc) parsHost.nFunc++; break; //case '<': if(parsHost.nFunc>0) parsHost.nFunc--; break; case '>': parsHost.nFunc = (parsHost.nFunc+1)%parsHost.MaxFunc; break; case '<': parsHost.nFunc = (parsHost.nFunc+parsHost.MaxFunc-1)%parsHost.MaxFunc; break; case 13: for(int i=0;i<1; i++) calcStep(); break; default: if(!im3DHost.key_func(key, x, y)) { if(type_diag_flag>=0) printf(" %d (%d,%d) \n", key, x, y); } return; } copy2dev( parsHost, pars ); hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); draw_all(); recalc_at_once=true; } static void draw_func() { im3DHost.fName = FuncStr[parsHost.nFunc]; glutPostRedisplay(); im2D.draw(im3DHost.reset_title()); } //void (*idle_func_ptr)(float* ); static void idle_func() { im3DHost.recalc_func(); } static void mouse_func(int button, int state, int x, int y) { im3DHost.mouse_func(button, state, x, y); } static void motion_func(int x, int y) { im3DHost.motion_func(x, y); } static void special_func(int key, int x, int y) { im3DHost.special_func(key, x, y); } bool interactive=true, test_only=false; //bool help_only=false, test_only=false; void LBMParamsHost::set(){ nFunc = 0; MaxFunc = sizeof(FuncStr)/sizeof(char*); Nx=(1<<Rank)*brick.x; //128; Ny=(1<<Rank)*brick.y; //128; Nz=(1<<Rank)*brick.z; //128; Nx=LSizeX*brick.x; Ny=LSizeY*brick.y; Nz=LSizeZ*brick.z; if(!test_only) { hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindFloat); CHECK_ERROR( hipGetLastError() ); CHECK_ERROR( hipMalloc3DArray(&data, &channelDesc, make_hipExtent(Nx*No,Ny,Nz), hipArraySurfaceLoadStore) ); struct hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeArray; resDesc.res.array.array = data; texSdata=0; CHECK_ERROR( hipCreateSurfaceObject(&texSdata, &resDesc) ); channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); CHECK_ERROR( hipGetLastError() ); CHECK_ERROR( hipMalloc3DArray(&fdata, &channelDesc, make_hipExtent(Nx,Ny,Nz), hipArraySurfaceLoadStore) ); CHECK_ERROR( hipMalloc3DArray(&gdata, &channelDesc, make_hipExtent(Nx,Ny,Nz), hipArraySurfaceLoadStore) ); memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeArray; texFdata=0; texGdata=0; resDesc.res.array.array = fdata; CHECK_ERROR( hipCreateSurfaceObject(&texFdata, &resDesc) ); resDesc.res.array.array = gdata; CHECK_ERROR( hipCreateSurfaceObject(&texGdata, &resDesc) ); } CHECK_ERROR(hipMallocManaged((void**)&NiterMax,sizeof(int ))); CHECK_ERROR(hipMemset(NiterMax, 0, sizeof(int ))); CHECK_ERROR(hipMallocManaged((void**)&mass ,sizeof(ftype ))); CHECK_ERROR(hipMemset(mass , 0, sizeof(ftype))); CHECK_ERROR(hipMallocManaged((void**)&enrg ,sizeof(ftype ))); CHECK_ERROR(hipMemset(enrg , 0, sizeof(ftype))); CHECK_ERROR(hipMallocManaged((void**)&moment ,sizeof(ftype3))); CHECK_ERROR(hipMemset(moment , 0, sizeof(ftype3))); dt=1; pc.set(); } int print_help() { printf("help | using in test|batch mode:\n ./lbm [--help|--test|--batch]\n"); printf("using in interactive mode:\n ./lbm %s\n", im3DHost.command_line_help_string()); im3DHost.print_command_line_help(); return 0; } void reset(im3D_pars* p=0); int maxR; int _main(int argc, char** argv) { maxR=1;//atoi(argv[1]); ::reset(); argv ++; argc --; im3DHost.reset(); while(argc>0 && strncmp(*argv,"--",2)==0) { int pp=1; if(strcmp(*argv,"--test")==0) test_only = true; else if(strcmp(*argv,"--batch")==0) interactive = false; else pp = im3DHost.init_from_command_line(argv); if(pp<=0) return print_help(); else if(pp==1) printf("par: %s; \n", argv[0]); else if(pp==2) printf("par: %s; vals: %s\n", argv[0], argv[1]); argv += pp; argc -= pp; }; if(test_only) printf("No GL\n"); else printf("With GL\n"); im2D.get_device(3,0); type_diag_flag = 1; try { if(type_diag_flag>=1) printf(" \n"); cudaTimer tm; tm.start(); copy2dev( e_host[0], e ); copy2dev( w_host[0], w ); #if defined D1Q3 || defined D3Q27 #else copy2dev( ef_host[0], ef ); #endif #ifdef D3Q125 int reverseX_correct[Qn], reverseY_correct[Qn], reverseZ_correct[Qn]; for(int i=0; i<Qn; i++) { reverseX_correct[i] = reverseX_host[i]; reverseY_correct[i] = reverseY_host[i]; reverseZ_correct[i] = reverseZ_host[i]; } for(int i=0; i<Qn; i++) { if(ef_host[i].x!=0) { int j=0, found=0; while(found==0 && j<Qn) { if(ef_host[j]==ef_host[i]*make_ftype3(-1,1,1)) {found=1; break;} j++; } if(found==0) { printf("cannot find reverse index: it's strange\n"); exit(-1); } reverseX_correct[i] = j; } if(ef_host[i].y!=0) { int j=0, found=0; while(found==0 && j<Qn) { if(ef_host[j]==ef_host[i]*make_ftype3(1,-1,1)) {found=1; break;} j++; } if(found==0) { printf("cannot find reverse index: it's strange\n"); exit(-1); } reverseY_correct[i] = j; } if(ef_host[i].z!=0) { int j=0, found=0; while(found==0 && j<Qn) { if(ef_host[j]==ef_host[i]*make_ftype3(1,1,-1)) {found=1; break;} j++; } if(found==0) { printf("cannot find reverse index: it's strange\n"); exit(-1); } reverseZ_correct[i] = j; } } copy2dev( reverseX_correct[0], reverseX ); copy2dev( reverseY_correct[0], reverseY ); copy2dev( reverseZ_correct[0], reverseZ ); #else copy2dev( reverseX_host[0], reverseX ); copy2dev( reverseY_host[0], reverseY ); copy2dev( reverseZ_host[0], reverseZ ); #endif copy2dev( reverseXYZ_host[0], reverseXYZ ); parsHost.set(); hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); copy2dev( parsHost, pars ); hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); init(); hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); copy2dev( parsHost, pars ); hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() ); if(test_only) { // while(parsHost.iStep<1500*4*8) { while(parsHost.iStep<1) { const int Nx=parsHost.Nx, Ny=parsHost.Ny, Nz=parsHost.Nz; tm.start(); calcStep(); double tCpu=tm.stop(); printf("run time: %.2f msec, %.2f MLU/sec\n", tCpu, 1.e-6*Nx*Ny*Nz/tCpu); // break; } return 0; } tm.start(); parsHost.reset_im(); im3DHost.reset(parsHost.arr4im); copy2dev( parsHost, pars ); //CHECK_ERROR( hipMemset(parsHost.arr4im.Arr3Dbuf,0,((long long int)Nx)*Ny*Nz*sizeof(ftype)) ); im2D.get_device(3,0); im2D.init_image(argc,argv, im3DHost.bNx, im3DHost.bNy, "im3D"); im3DHost.init3D(parsHost.arr4im); im3DHost.iz0=parsHost.Nx-1; im3DHost.key_func('b',0,0); im3DHost.initCuda(parsHost.arr4im); draw_all(); if(type_diag_flag>=1) printf(" GLUT \n"); glutIdleFunc(idle_func); glutKeyboardFunc(key_func); glutMouseFunc(mouse_func); glutMotionFunc(motion_func); glutDisplayFunc(draw_func); glutSpecialFunc(special_func); if(type_diag_flag>=0) printf("Init cuda device: %.1f msec\n", tm.stop()); glutMainLoop(); } catch(...) { printf(" - .\n"); } parsHost.clear(); return -1; } int main(int argc, char** argv) { return _main(argc,argv); } int run(int argc, char** argv) { return _main(argc,argv); } float get_val_from_arr3D(int ix, int iy, int iz) { Arr3D_pars& arr=parsHost.arr4im; if(arr.inCPUmem) return arr.Arr3Dbuf[arr.get_ind(ix,iy,iz)]; float res=0.0; if(arr.inGPUmem) CHECK_ERROR(hipMemcpy(&res, arr.get_ptr(ix,iy,iz), sizeof(float), hipMemcpyDeviceToHost)); return res; }
e04916baff2ca8906aff462b96b87dd71a9f24c5.cu
//#include <cuda_fp16.h> #include "cuda_math.h" //#include "cuda_math_double.h" #include <stdio.h> #include <stdlib.h> #include <errno.h> #include "structs.cuh" #include "im2D.h" #include "im3D.hpp" int type_diag_flag=0; im3D_pars im3DHost; LBMParamsHost parsHost; __constant__ LBMParams pars; __constant__ int3 e[Qn]; __constant__ ftype3 ef[Qn]; __constant__ ftype w[Qn]; __constant__ int reverseX[Qn]; __constant__ int reverseY[Qn]; __constant__ int reverseZ[Qn]; __constant__ int reverseXYZ[Qn]; const char* FuncStr[] = {"Rho","Velocity","Mach","T","M4mom","Iterations","f0","f1","f2","f3","f4","f5","f6","f7","f8"}; //const char* FuncStr[] = {"F","G"}; __device__ float pow2(float v) { return v*v; } __global__ void lbm_draw_surf(float* buf){ } __global__ void lbm_draw(float* buf) { for(int ix=threadIdx.x; ix<pars.Nx; ix+=pars.Nx/4) { int iz=blockIdx.x; int iy=blockIdx.y; int xbuf=ix; int ybuf=iy; float* pbuf=&buf[xbuf+pars.Nx*(ybuf+pars.Ny*iz)]; ftype f[Qn]; Cell cell; const int2 gcind = Group::ind_conv(ix, iy, iz); const int gindex=gcind.x, cindex=gcind.y; pars.loadGroups[gindex].unpack(cell, cindex); for(int i=0; i<Qn; i++) f[i] = cell.f[i]; ftype rho=cell.rho; ftype4 uT=cell.uT; uT/=pars.dt; uT.w/=pars.dt; ftype M4mom=0; for(int i=0; i<Qn; i++) { ftype viX = sqrt(uT.w/TLat)*ef[i].x+uT.x; M4mom+= f[i]*viX*viX*viX*viX; } Cinfo& cinf = pars.cinfo[Cinfo::ind_zip(ix,iy,iz)]; int niter=cinf.niter; if(cinf.set==0) { *pbuf=0; continue; } switch(pars.nFunc) { case 0: *pbuf=cell.rho; break; //case 1: *pbuf=length(make_ftype3(cell.uT.x,cell.uT.y,cell.uT.z)); break; case 1: *pbuf=uT.x; break; case 2: *pbuf=uT.x/sqrt(uT.w); break; case 3: *pbuf=uT.w; break; case 4: *pbuf=M4mom; break; case 5: *pbuf=float(niter); break; case 6: *pbuf=f[0]; break; case 7: *pbuf=f[1]; break; case 8: *pbuf=f[2]; break; case 9: *pbuf=f[3]; break; case 10: *pbuf=f[4]; break; case 11: *pbuf=f[5]; break; case 12: *pbuf=f[6]; break; case 13: *pbuf=f[7]; break; case 14: *pbuf=f[8]; break; } } } void draw_all(){ lbm_draw<<<dim3(parsHost.Nz,parsHost.Ny),parsHost.Nx/4>>>(parsHost.arr4im.Arr3Dbuf); cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); im3DHost.initCuda(parsHost.arr4im); } void init(); void drop(std::string* fprefix); void calcStep(); void idle_func_calc::step() { for(int i=0;i<1; i++) calcStep(); im3DHost.save_png(parsHost.iStep); draw_all(); cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); recalc_at_once=true; } static void key_func(unsigned char key, int x, int y) { if(type_diag_flag>=2) printf("keyN=%d, coors=(%d,%d)\n", key, x, y); if(key == 'h') { printf("\ ======= Управление mxw3D:\n\ <¦> \tИзменение функции для визуализации: WEH¦Sx¦Ez¦Ey¦Ex¦Hx¦Hy¦Hz¦Sy¦Sz¦eps\n\ «Enter»\tПересчёт одного большого шага\n\ b \tвключает пересчёт в динамике (см. «Управление динамикой»)\n\ "); im3DHost.print_help(); return; } ftype t0; switch(key) { //case '>': if(parsHost.nFunc<parsHost.MaxFunc) parsHost.nFunc++; break; //case '<': if(parsHost.nFunc>0) parsHost.nFunc--; break; case '>': parsHost.nFunc = (parsHost.nFunc+1)%parsHost.MaxFunc; break; case '<': parsHost.nFunc = (parsHost.nFunc+parsHost.MaxFunc-1)%parsHost.MaxFunc; break; case 13: for(int i=0;i<1; i++) calcStep(); break; default: if(!im3DHost.key_func(key, x, y)) { if(type_diag_flag>=0) printf("По клавише %d в позиции (%d,%d) нет никакого действия\n", key, x, y); } return; } copy2dev( parsHost, pars ); cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); draw_all(); recalc_at_once=true; } static void draw_func() { im3DHost.fName = FuncStr[parsHost.nFunc]; glutPostRedisplay(); im2D.draw(im3DHost.reset_title()); } //void (*idle_func_ptr)(float* ); static void idle_func() { im3DHost.recalc_func(); } static void mouse_func(int button, int state, int x, int y) { im3DHost.mouse_func(button, state, x, y); } static void motion_func(int x, int y) { im3DHost.motion_func(x, y); } static void special_func(int key, int x, int y) { im3DHost.special_func(key, x, y); } bool interactive=true, test_only=false; //bool help_only=false, test_only=false; void LBMParamsHost::set(){ nFunc = 0; MaxFunc = sizeof(FuncStr)/sizeof(char*); Nx=(1<<Rank)*brick.x; //128; Ny=(1<<Rank)*brick.y; //128; Nz=(1<<Rank)*brick.z; //128; Nx=LSizeX*brick.x; Ny=LSizeY*brick.y; Nz=LSizeZ*brick.z; if(!test_only) { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindFloat); CHECK_ERROR( cudaGetLastError() ); CHECK_ERROR( cudaMalloc3DArray(&data, &channelDesc, make_cudaExtent(Nx*No,Ny,Nz), cudaArraySurfaceLoadStore) ); struct cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; resDesc.res.array.array = data; texSdata=0; CHECK_ERROR( cudaCreateSurfaceObject(&texSdata, &resDesc) ); channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); CHECK_ERROR( cudaGetLastError() ); CHECK_ERROR( cudaMalloc3DArray(&fdata, &channelDesc, make_cudaExtent(Nx,Ny,Nz), cudaArraySurfaceLoadStore) ); CHECK_ERROR( cudaMalloc3DArray(&gdata, &channelDesc, make_cudaExtent(Nx,Ny,Nz), cudaArraySurfaceLoadStore) ); memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; texFdata=0; texGdata=0; resDesc.res.array.array = fdata; CHECK_ERROR( cudaCreateSurfaceObject(&texFdata, &resDesc) ); resDesc.res.array.array = gdata; CHECK_ERROR( cudaCreateSurfaceObject(&texGdata, &resDesc) ); } CHECK_ERROR(cudaMallocManaged((void**)&NiterMax,sizeof(int ))); CHECK_ERROR(cudaMemset(NiterMax, 0, sizeof(int ))); CHECK_ERROR(cudaMallocManaged((void**)&mass ,sizeof(ftype ))); CHECK_ERROR(cudaMemset(mass , 0, sizeof(ftype))); CHECK_ERROR(cudaMallocManaged((void**)&enrg ,sizeof(ftype ))); CHECK_ERROR(cudaMemset(enrg , 0, sizeof(ftype))); CHECK_ERROR(cudaMallocManaged((void**)&moment ,sizeof(ftype3))); CHECK_ERROR(cudaMemset(moment , 0, sizeof(ftype3))); dt=1; pc.set(); } int print_help() { printf("help | using in test|batch mode:\n ./lbm [--help|--test|--batch]\n"); printf("using in interactive mode:\n ./lbm %s\n", im3DHost.command_line_help_string()); im3DHost.print_command_line_help(); return 0; } void reset(im3D_pars* p=0); int maxR; int _main(int argc, char** argv) { maxR=1;//atoi(argv[1]); ::reset(); argv ++; argc --; im3DHost.reset(); while(argc>0 && strncmp(*argv,"--",2)==0) { int pp=1; if(strcmp(*argv,"--test")==0) test_only = true; else if(strcmp(*argv,"--batch")==0) interactive = false; else pp = im3DHost.init_from_command_line(argv); if(pp<=0) return print_help(); else if(pp==1) printf("par: %s; \n", argv[0]); else if(pp==2) printf("par: %s; vals: %s\n", argv[0], argv[1]); argv += pp; argc -= pp; }; if(test_only) printf("No GL\n"); else printf("With GL\n"); im2D.get_device(3,0); type_diag_flag = 1; try { if(type_diag_flag>=1) printf("Настройка опций визуализации по умолчанию\n"); cudaTimer tm; tm.start(); copy2dev( e_host[0], e ); copy2dev( w_host[0], w ); #if defined D1Q3 || defined D3Q27 #else copy2dev( ef_host[0], ef ); #endif #ifdef D3Q125 int reverseX_correct[Qn], reverseY_correct[Qn], reverseZ_correct[Qn]; for(int i=0; i<Qn; i++) { reverseX_correct[i] = reverseX_host[i]; reverseY_correct[i] = reverseY_host[i]; reverseZ_correct[i] = reverseZ_host[i]; } for(int i=0; i<Qn; i++) { if(ef_host[i].x!=0) { int j=0, found=0; while(found==0 && j<Qn) { if(ef_host[j]==ef_host[i]*make_ftype3(-1,1,1)) {found=1; break;} j++; } if(found==0) { printf("cannot find reverse index: it's strange\n"); exit(-1); } reverseX_correct[i] = j; } if(ef_host[i].y!=0) { int j=0, found=0; while(found==0 && j<Qn) { if(ef_host[j]==ef_host[i]*make_ftype3(1,-1,1)) {found=1; break;} j++; } if(found==0) { printf("cannot find reverse index: it's strange\n"); exit(-1); } reverseY_correct[i] = j; } if(ef_host[i].z!=0) { int j=0, found=0; while(found==0 && j<Qn) { if(ef_host[j]==ef_host[i]*make_ftype3(1,1,-1)) {found=1; break;} j++; } if(found==0) { printf("cannot find reverse index: it's strange\n"); exit(-1); } reverseZ_correct[i] = j; } } copy2dev( reverseX_correct[0], reverseX ); copy2dev( reverseY_correct[0], reverseY ); copy2dev( reverseZ_correct[0], reverseZ ); #else copy2dev( reverseX_host[0], reverseX ); copy2dev( reverseY_host[0], reverseY ); copy2dev( reverseZ_host[0], reverseZ ); #endif copy2dev( reverseXYZ_host[0], reverseXYZ ); parsHost.set(); cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); copy2dev( parsHost, pars ); cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); init(); cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); copy2dev( parsHost, pars ); cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() ); if(test_only) { // while(parsHost.iStep<1500*4*8) { while(parsHost.iStep<1) { const int Nx=parsHost.Nx, Ny=parsHost.Ny, Nz=parsHost.Nz; tm.start(); calcStep(); double tCpu=tm.stop(); printf("run time: %.2f msec, %.2f MLU/sec\n", tCpu, 1.e-6*Nx*Ny*Nz/tCpu); // break; } return 0; } tm.start(); parsHost.reset_im(); im3DHost.reset(parsHost.arr4im); copy2dev( parsHost, pars ); //CHECK_ERROR( cudaMemset(parsHost.arr4im.Arr3Dbuf,0,((long long int)Nx)*Ny*Nz*sizeof(ftype)) ); im2D.get_device(3,0); im2D.init_image(argc,argv, im3DHost.bNx, im3DHost.bNy, "im3D"); im3DHost.init3D(parsHost.arr4im); im3DHost.iz0=parsHost.Nx-1; im3DHost.key_func('b',0,0); im3DHost.initCuda(parsHost.arr4im); draw_all(); if(type_diag_flag>=1) printf("Настройка GLUT и запуск интерфейса\n"); glutIdleFunc(idle_func); glutKeyboardFunc(key_func); glutMouseFunc(mouse_func); glutMotionFunc(motion_func); glutDisplayFunc(draw_func); glutSpecialFunc(special_func); if(type_diag_flag>=0) printf("Init cuda device: %.1f msec\n", tm.stop()); glutMainLoop(); } catch(...) { printf("Возникла какая-то ошибка.\n"); } parsHost.clear(); return -1; } int main(int argc, char** argv) { return _main(argc,argv); } int run(int argc, char** argv) { return _main(argc,argv); } float get_val_from_arr3D(int ix, int iy, int iz) { Arr3D_pars& arr=parsHost.arr4im; if(arr.inCPUmem) return arr.Arr3Dbuf[arr.get_ind(ix,iy,iz)]; float res=0.0; if(arr.inGPUmem) CHECK_ERROR(cudaMemcpy(&res, arr.get_ptr(ix,iy,iz), sizeof(float), cudaMemcpyDeviceToHost)); return res; }
ef0bc1b4161b2589c5435be3abbd55e4f2afa304.hip
// !!! This is a file automatically generated by hipify!!! // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // standard imports #include <cfloat> #include <fstream> #include <iostream> #include <thread> #define TINYOBJLOADER_IMPLEMENTATION #define USE_PNG // include OpenGP and OpenGL #include <OpenGP/GL/Application.h> #include <OpenGP/GL/Components/GUICanvasComponent.h> #include <OpenGP/GL/ImguiRenderer.h> #include <OpenGP/Image/Image.h> // include Octopus // CHANGE #include "OctopusComponent.h" // TODO - do I need to modify Scene.h? #include "Scene.h" #include "CollisionGrid.cuh" #define OPENGP_IMPLEMENT_ALL_IN_THIS_FILE #include <OpenGP/util/implementations.h> // namespace OpenGP using namespace OpenGP; // main int main(int argc, char **argv) { // constant shadow size int shadow_size = 2048; // make OpenGP Application Application app; // TODO - From Scene.h? Or OpenGP? Scene scene; // make light auto &light_entity = scene.create_entity_with<CameraComponent>(); light_entity.get<TransformComponent>().set_forward( Vec3(-1, -2, 0).normalized()); light_entity.get<TransformComponent>().position = Vec3(50, 100, 0); Mat4x4 shadow_matrix = (light_entity.get_projection(shadow_size, shadow_size) * light_entity.get_view()); // make floor auto &floor_entity = scene.create_entity_with<WorldRenderComponent>(); auto &floor_renderer = floor_entity.set_renderer<SurfaceMeshRenderer>(); floor_renderer.get_gpu_mesh().set_vpoint( {Vec3(-10000, 0, -10000), Vec3(10000, 0, -10000), Vec3(-10000, 0, 10000), Vec3(10000, 0, 10000)}); floor_renderer.get_gpu_mesh().set_vnormal( {Vec3(0, 1, 0), Vec3(0, 1, 0), Vec3(0, 1, 0), Vec3(0, 1, 0)}); floor_renderer.get_gpu_mesh().set_triangles({0, 1, 2, 1, 2, 3}); // define floormat material Material floormat(R"GLSL( uniform sampler2D shadow_map; uniform vec3 light_pos; uniform mat4 shadow_matrix; uniform float shadow_near; uniform float shadow_far; vec3 world2uvdepth(vec3 pos, mat4 mat) { vec4 a = mat * vec4(pos, 1); vec3 b = a.xyz / a.w; return (b + vec3(1)) / 2; } float get_shadow_mask(vec2 uv) { return 1 - smoothstep(0.3, 0.5, length(uv - vec2(0.5, 0.5))); } vec3 get_ambient(vec3 pos) { vec3 ambient = vec3(0.14, 0.14, 0.18); vec3 uvd = world2uvdepth(pos, shadow_matrix); return ambient + vec3(0.2) * get_shadow_mask(uvd.xy); } float linear_shadow_depth(float d) { return shadow_near * shadow_far / (shadow_far + d * (shadow_near - shadow_far)); } float get_shadow(vec3 pos) { ivec2 dim = textureSize(shadow_map, 0); vec3 uvd = world2uvdepth(pos, shadow_matrix); vec2 base_coord = uvd.xy * dim; ivec2 base_coord_i = ivec2(floor(base_coord)); vec2 inter = fract(base_coord); mat4 shadow_depths; for (int i = 0; i < 4; ++i) { for (int j = 0; j < 4; ++j) { shadow_depths[i][j] = linear_shadow_depth(texelFetch(shadow_map, base_coord_i + ivec2(i-1, j-1), 0).r); } } float threshold = linear_shadow_depth(uvd.z) - 0.1; mat2 pcf_vals = mat2(0); for (int i = 0; i < 2; ++i) { for (int j = 0; j < 2; ++j) { for (int x = 0; x < 3; ++x) { for (int y = 0; y < 3; ++y) { pcf_vals[i][j] += (shadow_depths[x + i][y + j] < threshold) ? 0 : (1.0 / 9.0); } } } } float a = mix(pcf_vals[0][0], pcf_vals[1][0], inter.x); float b = mix(pcf_vals[0][1], pcf_vals[1][1], inter.x); return mix(a, b, inter.y) * get_shadow_mask(uvd.xy); } vec4 fragment_shade() { vec3 pos = get_position(); vec3 lightdir = normalize(light_pos - pos); vec3 white_color = vec3(1, 1, 1); vec3 black_color = vec3(0.6, 0.6, 0.6); vec3 background = (white_color + black_color) / 2; vec3 diffuse_color = white_color; vec3 modpos = mod(pos / 5, 1); if ((modpos.x < 0.5) ^^ (modpos.z < 0.5)) { diffuse_color = black_color; } float blur = exp(-2 * max(length(dFdx(pos)), length(dFdy(pos)))); blur = clamp(2 * blur, 0, 1); diffuse_color = mix(background, diffuse_color, blur); vec3 ambient = get_ambient(pos); float shadow = get_shadow(pos); vec3 out_color = shadow * 0.85 * clamp(dot(get_normal(), normalize(lightdir)), 0, 1) * diffuse_color; out_color += ambient * diffuse_color; return vec4(out_color, 1); } )GLSL"); floormat.set_property("ao_map", 6); floormat.set_property("shadow_map", 7); floormat.set_property("shadow_matrix", shadow_matrix); floormat.set_property("light_pos", light_entity.get<TransformComponent>().position); floormat.set_property("shadow_near", light_entity.near_plane); floormat.set_property("shadow_far", light_entity.far_plane); floor_renderer.set_material(floormat); floor_renderer.rebuild(); // make viper scene viper::Scene sim_scene; // Set OctopusComponent property to current scene OctopusComponent::v_scene = &sim_scene; // TODO - what is octoswarm?? // CHANGE auto &octoswarm = scene.create_entity_with<OctopusComponent>(); octoswarm.renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.renderer->get_material().set_property("shadow_near", light_entity.near_plane); octoswarm.renderer->get_material().set_property("shadow_far", light_entity.far_plane); octoswarm.sphere_renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.sphere_renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.sphere_renderer->get_material().set_property( "shadow_near", light_entity.near_plane); octoswarm.sphere_renderer->get_material().set_property( "shadow_far", light_entity.far_plane); octoswarm.tsphere_renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.tsphere_renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.tsphere_renderer->get_material().set_property( "shadow_near", light_entity.near_plane); octoswarm.tsphere_renderer->get_material().set_property( "shadow_far", light_entity.far_plane); octoswarm.cannonball_renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.cannonball_renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.cannonball_renderer->get_material().set_property( "shadow_near", light_entity.near_plane); octoswarm.cannonball_renderer->get_material().set_property( "shadow_far", light_entity.far_plane); octoswarm.pillar_renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.pillar_renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.pillar_renderer->get_material().set_property( "shadow_near", light_entity.near_plane); octoswarm.pillar_renderer->get_material().set_property( "shadow_far", light_entity.far_plane); auto &c_entity = scene.create_entity_with<TrackballComponent>(); c_entity.oriented = true; // constants - window? int ww = 3840, wh = 1080; // make framebuffer Framebuffer fb, fb_shadow; RGB8Texture color_map, color_map_shadow; D32FTexture depth_map, depth_map_shadow; auto realloc = [&](int w, int h) { color_map.allocate(w, h); depth_map.allocate(w, h); }; realloc(ww, wh); depth_map_shadow.allocate(shadow_size, shadow_size); color_map_shadow.allocate(shadow_size, shadow_size); fb.attach_color_texture(color_map); fb.attach_depth_texture(depth_map); fb_shadow.attach_color_texture(color_map_shadow); fb_shadow.attach_depth_texture(depth_map_shadow); RGB8Texture colmap; Image<Eigen::Matrix<uint8_t, 3, 1>> colmap_cpu(2048, 2048); // Read in texture binary // WINGATE // TODO - change this to read in Tentacle_diffuse.png or Tentacle_convert.mtl std::ifstream("texture.bin", std::ios::binary).read( reinterpret_cast<char*>(&colmap_cpu(0, 0)), 12582912); colmap.upload(colmap_cpu); FullscreenQuad fsquad; // TODO - change defaults? Does this change the view? // No, change where says set_defaults bool show_pills = false; bool splitscreen = false; // bool splitscreen = true; // function to set pill visibility of octopi auto set_pill_visibility = [&](bool visible) { show_pills = visible; octoswarm.render_comp->visible = !visible; octoswarm.sphere_render_comp->visible = visible; octoswarm.vis_update(); }; // function to draw scene auto draw_scene = [&](int width, int height, int x, int y) { //====================================================================== // Draw shadow map fb_shadow.bind(); light_entity.draw(shadow_size, shadow_size); fb_shadow.unbind(); //====================================================================== // Draw scene with shadows fb.bind(); glActiveTexture(GL_TEXTURE5); colmap.bind(); glActiveTexture(GL_TEXTURE7); depth_map_shadow.bind(); glActiveTexture(GL_TEXTURE0); // set up camera auto &cam = c_entity.get<CameraComponent>(); cam.draw(color_map.get_width(), color_map.get_height(), 0, 0, false); // if sphere component is visible if (octoswarm.sphere_render_comp->visible) { RenderContext context; glDepthMask(GL_FALSE); context.aspect = (float)color_map.get_width() / (float)color_map.get_height(); context.vfov = cam.vfov; context.near = cam.near_plane; context.far = cam.far_plane; context.eye = cam.get<TransformComponent>().position; context.forward = cam.get<TransformComponent>().forward(); context.up = cam.get<TransformComponent>().up(); context.update_view(); context.update_projection(); auto &renderable = *octoswarm.tsphere_render_comp; auto &transform = renderable.get<TransformComponent>(); context.translation = transform.position; context.scale = transform.scale; context.rotation = transform.rotation; context.update_model(); glEnable(GL_DEPTH_TEST); renderable.get_renderer().render(context); glDepthMask(GL_TRUE); } cam.draw_gui(); fb.unbind(); //====================================================================== // Draw color map to window glViewport(x, y, width, height); fsquad.draw_texture(color_map); }; // make window function auto &window = app.create_window([&](Window &window) { std::tie(ww, wh) = window.get_size(); // TODO - what is this variable? splitscreen int fbw_new = splitscreen ? ww / 2 : ww; int fbh_new = wh; int fbw = color_map.get_width(); int fbh = color_map.get_height(); if (fbw_new != fbw || fbh_new != fbh) { realloc(fbw_new, fbh_new); } // if splitscreen, do half and half pill vilibility if (splitscreen) { // draw left half pills invisible set_pill_visibility(false); draw_scene(ww / 2, wh, 0, 0); // draw right half pills visible set_pill_visibility(true); draw_scene(ww / 2, wh, ww / 2, 0); } else { // if not split screen, call octoswarm.vis_update // TODO - what is vis_udpate? octoswarm.vis_update(); draw_scene(ww, wh, 0, 0); } }); // set window size and title window.set_size(ww, wh); window.set_title("VIPER Demo"); // set up input // TODO - keyboard / mouse? auto &input = window.get_input(); // TODO - camera position? Could be important // CHANGE c_entity.get<CameraComponent>().set_window(window); // ORIGINAL // c_entity.center = Vec3(0, 1, 0); // c_entity.get<TransformComponent>().position = Vec3(-12, 1, 0); // SINGLE BIG TENTACLE // c_entity.center = Vec3(-30, 1, -10); // c_entity.get<TransformComponent>().position = Vec3(-12, 1, 10); // WORKS FOR ZERO GRAV BIG TENTACLE // c_entity.center = Vec3(-50, 20, 40); // c_entity.get<TransformComponent>().position = Vec3(-20, -50, 20); // change this to move in opposite direction that you would think // increasing y moves further down c_entity.center = Vec3(-35, 10, 30); // making z negative moes closer c_entity.get<TransformComponent>().position = Vec3(-20, -50, 20); // TODO - what is this auto &bsphere_entity = scene.create_entity_with<WorldRenderComponent>(); auto &bsphere_renderer = bsphere_entity.set_renderer<SphereMeshRenderer>(); // get mouse position auto get_mouse_ray = [&](Vec3 &eye, Vec3 &dir) { // get position Vec2 pos = input.mouse_position; pos[1] = wh - pos[1]; int w = splitscreen ? ww / 2 : ww; pos = 2 * pos.cwiseQuotient(Vec2(w, wh)) - Vec2(1, 1); Vec4 cs(pos[0], pos[1], 0.1, 1); // TODO - what do? rotate? auto &cam = c_entity.get<CameraComponent>(); Mat4x4 inv_mat = (cam.get_projection(w, wh) * cam.get_view()).inverse(); Vec4 world = inv_mat * cs; Vec3 p = world.head<3>() / world[3]; eye = c_entity.get<TransformComponent>().position; dir = (p - eye).normalized(); }; // variables to keep track of runtime performance int framerate = 0; double frametime = 0; double sim_frametime = 0; float playback = 1.0; int it_count = 10; // TODO - change? bool hide_gui = false; bool simulating = true; bool single_step = false; bool bsphere_vis = false; std::vector<float> framerates(120); // defaults for settings and physics auto set_defaults = [&]() { // WINGATE // CHANGE THIS TO SHOW PILLS OR MESH // false - show mesh // true - show VIPER primitives // show_pills = false; show_pills = true; octoswarm.render_comp->visible = !show_pills; octoswarm.sphere_render_comp->visible = show_pills; it_count = 10; // sim_scene.gravity_strength = 1.0; sim_scene.gravity_strength = 0.1; playback = 1.0; }; set_defaults(); // Create GUI auto &canvas = scene.create_entity_with<GUICanvasComponent>(); canvas.set_action([&]() { if (hide_gui) return; ImGui::SetNextWindowSize(ImVec2(400, 500)); ImGui::Begin("Controls", nullptr, ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoSavedSettings); char fr_label[256]; sprintf(fr_label, "Framerate %i fps\n Total: %3.1f ms\n Sim: %3.1f ms", framerate, frametime, sim_frametime); ImGui::PlotLines(fr_label, &(framerates[0]), framerates.size(), 0, "", 0, 60); ImGui::Separator(); if (ImGui::Button("Reset")) { octoswarm.reset(); } ImGui::SameLine(0, 4); const char *bname = simulating ? "Pause" : "Resume"; if (ImGui::Button(bname)) { simulating = !simulating; } if (!simulating) { ImGui::SameLine(0, 4); if (ImGui::Button("Step")) { single_step = true; } } ImGui::Checkbox("Split Screen", &splitscreen); if (ImGui::Checkbox("Show Primitives", &show_pills)) { set_pill_visibility(show_pills); } ImGui::SliderFloat("Gravity", &sim_scene.gravity_strength, -1.0f, 3.0f); ImGui::SliderInt("Solver Iterations", &it_count, 0, 50); if (ImGui::Button("Set Defaults")) { set_defaults(); octoswarm.vis_update(); } ImGui::Separator(); const char *const scenes[] = {"Empty", "Pillars", "Cannonballs", "Explosion"}; if (ImGui::ListBox("Scenes", &octoswarm.scene_index, scenes, sizeof(scenes) / sizeof(scenes[0]))) { // TODO - resets octoswarm from beginning of simulation? could be important octoswarm.reset(); } ImGui::Separator(); ImGui::LabelText("Controls", "Look: Middle Mouse"); ImGui::Text("Recenter: Right Mouse"); ImGui::Text("Pan: Shift + Middle Mouse"); ImGui::Text("Grab: Left Mouse"); ImGui::Text("Shoot: Spacebar"); ImGui::Text("Toggle Primitives: F10"); ImGui::Text("Pause/Resume: F11"); ImGui::Text("Show/Hide Window: F12"); ImGui::End(); }); // place camera canvas.set_camera(c_entity.get<CameraComponent>()); int chambered_cow = 0; long frame = 0; long sim_frame = 0; // keep track of statistics double last_time = glfwGetTime(); double frame_avg = 0; double sim_frame_avg = 0; int held = 0; int selected = -1; // TODO - idk what this is bool swapped_pills = false; bool swapped_pause = false; bool swapped_window = false; bool recentered = false; // app event listener unsigned t = 0; unsigned delay = 60; app.add_listener<ApplicationUpdateEvent>( [&](const ApplicationUpdateEvent &) { // // Make muscle's contract octoswarm.update(t); t++; // if (i >= delay) { // unsigned j = i - delay + 3; // if (j < sim_scene.constraints.stretch.size()){ // if (!(j%3 == 0)){ // sim_scene.constraints.stretch[j].L *= .5; // } else { // sim_scene.constraints.stretch[j].L *= 1.2; // } // } // } // // move to origin /* if (i < 60) { int j = 51; Vec3 p = sim_scene.state.x[j]; // std::cout << p[0] << " " << p[1] << " " << p[2] << std::endl; sim_scene.state.x[j] *= 0.95; p = sim_scene.state.x[j]; // std::cout << p[0] << " " << p[1] << " " << p[2] << std::endl; // std::cout << sim_scene.state.x[0]*10 << " " << sim_scene.state.x[1]*10 << " " << sim_scene.state.x[2]*10 << std::endl; // // std::cout << sim_scene.state.xp[0]*10 << std::endl; // std::cout << std::endl; } */ /* sim_scene.state.xa[k] = 0; sim_scene.state.x[k] = sim_scene.state.x[0] / 100000; sim_scene.state.xp[k] = sim_scene.state.xp[0] / 100000; sim_scene.state.xa[k] = 1; */ SphereMesh temp_smesh; auto vs_temp = temp_smesh.add_vertex(viper::CollisionGrid::b_sphere); temp_smesh.add_sphere(vs_temp); bsphere_entity.visible = bsphere_vis; bsphere_renderer.upload_mesh(temp_smesh); // if mouseclick? if (input.get_mouse(0)) { Vec3 eye, dir; get_mouse_ray(eye, dir); // if just selected if (selected == -1) { // TODO - if click on octopus, move it around? selected = octoswarm.intersect(eye, dir); // set velocity? to 0 sim_scene.state.xa[selected] = 0; // if octopus already selected } else { Vec3 p = sim_scene.state.x[selected]; Vec3 x = p - eye; Vec3 new_pos = p - (x - dir * dir.dot(x)); new_pos[1] = ::max(new_pos[1], sim_scene.state.r[selected]); sim_scene.state.x[selected] = new_pos; sim_scene.state.xp[selected] = sim_scene.state.x[selected]; } // unselect } else if (selected != -1) { sim_scene.state.xa[selected] = 1; selected = -1; } // TODO - idk what the get_mouse 1 is // move something around? if (input.get_mouse(1)) { Image<float> depth_im; depth_map.download(depth_im); int mxi = int(input.mouse_position[0]); int myi = int(wh - input.mouse_position[1]); auto &cam = c_entity.get<CameraComponent>(); Mat4x4 inv_mat = (cam.get_projection() * cam.get_view()).inverse(); if (!recentered && !(mxi < 0 || mxi >= ww || myi < 0 || myi >= wh)) { Vec3 uvdepth; uvdepth.head<2>() = Vec2(float(mxi) / ww, float(myi) / wh); uvdepth[2] = min(depth_im(myi, mxi), 0.999); Vec4 dev(0, 0, 0, 1); dev.head<3>() = 2 * uvdepth - Vec3::Ones(); Vec4 world_h = inv_mat * dev; Vec3 new_center = world_h.head<3>() / world_h[3]; Vec3 dc = new_center - c_entity.center; c_entity.center += dc; c_entity.get<TransformComponent>().position += dc; recentered = true; } } else { recentered = false; } // F10 changes pill visibility if (input.get_key(GLFW_KEY_F10)) { if (!swapped_pills) { set_pill_visibility(!show_pills); swapped_pills = true; } } else { swapped_pills = false; } // F11 pause/unpause if (input.get_key(GLFW_KEY_F11)) { if (!swapped_pause) { simulating = !simulating; swapped_pause = true; } } else { swapped_pause = false; } // F12 hides GUI if (input.get_key(GLFW_KEY_F12)) { if (!swapped_window) { hide_gui = !hide_gui; swapped_window = true; } } else { swapped_window = false; } // Space spits out new octopus if (input.get_key(GLFW_KEY_SPACE)) { // every 5 updates if ((held % 5) == 0) { // TODO - important, makes new octopus Vec3 p = c_entity.get<TransformComponent>().position; Vec3 v = c_entity.get<TransformComponent>().forward(); octoswarm.set_position(chambered_cow, p + 3 * v, v); chambered_cow = (chambered_cow + 1) % octoswarm.n_cows; } held++; } else { held = 0; } // keep track of frame statistics double frame_time = 0.0; double this_time = last_time; while (frame_time < 0.016667) { this_time = glfwGetTime(); frame_time = this_time - last_time; std::this_thread::yield(); } last_time = this_time; framerates.erase(framerates.begin()); framerates.push_back(1.0 / frame_time); frame_avg += frame_time; if ((frame % 10) == 0) { frametime = 1000 * frame_avg / 10.0; framerate = 0.5 + 10.0 / frame_avg; frame_avg = 0; } // if simulating or supposed to take one step if (simulating || single_step) { // simulate one step forward // TODO - important double sim_time = sim_scene.step(playback / 60.f, it_count, true); sim_frame_avg += sim_time; if ((sim_frame % 10) == 0) { sim_frametime = sim_frame_avg / 10.0; sim_frame_avg = 0; } single_step = false; sim_frame++; } scene.update(); frame++; }); app.run(); return 0; }
ef0bc1b4161b2589c5435be3abbd55e4f2afa304.cu
// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // standard imports #include <cfloat> #include <fstream> #include <iostream> #include <thread> #define TINYOBJLOADER_IMPLEMENTATION #define USE_PNG // include OpenGP and OpenGL #include <OpenGP/GL/Application.h> #include <OpenGP/GL/Components/GUICanvasComponent.h> #include <OpenGP/GL/ImguiRenderer.h> #include <OpenGP/Image/Image.h> // include Octopus // CHANGE #include "OctopusComponent.h" // TODO - do I need to modify Scene.h? #include "Scene.h" #include "CollisionGrid.cuh" #define OPENGP_IMPLEMENT_ALL_IN_THIS_FILE #include <OpenGP/util/implementations.h> // namespace OpenGP using namespace OpenGP; // main int main(int argc, char **argv) { // constant shadow size int shadow_size = 2048; // make OpenGP Application Application app; // TODO - From Scene.h? Or OpenGP? Scene scene; // make light auto &light_entity = scene.create_entity_with<CameraComponent>(); light_entity.get<TransformComponent>().set_forward( Vec3(-1, -2, 0).normalized()); light_entity.get<TransformComponent>().position = Vec3(50, 100, 0); Mat4x4 shadow_matrix = (light_entity.get_projection(shadow_size, shadow_size) * light_entity.get_view()); // make floor auto &floor_entity = scene.create_entity_with<WorldRenderComponent>(); auto &floor_renderer = floor_entity.set_renderer<SurfaceMeshRenderer>(); floor_renderer.get_gpu_mesh().set_vpoint( {Vec3(-10000, 0, -10000), Vec3(10000, 0, -10000), Vec3(-10000, 0, 10000), Vec3(10000, 0, 10000)}); floor_renderer.get_gpu_mesh().set_vnormal( {Vec3(0, 1, 0), Vec3(0, 1, 0), Vec3(0, 1, 0), Vec3(0, 1, 0)}); floor_renderer.get_gpu_mesh().set_triangles({0, 1, 2, 1, 2, 3}); // define floormat material Material floormat(R"GLSL( uniform sampler2D shadow_map; uniform vec3 light_pos; uniform mat4 shadow_matrix; uniform float shadow_near; uniform float shadow_far; vec3 world2uvdepth(vec3 pos, mat4 mat) { vec4 a = mat * vec4(pos, 1); vec3 b = a.xyz / a.w; return (b + vec3(1)) / 2; } float get_shadow_mask(vec2 uv) { return 1 - smoothstep(0.3, 0.5, length(uv - vec2(0.5, 0.5))); } vec3 get_ambient(vec3 pos) { vec3 ambient = vec3(0.14, 0.14, 0.18); vec3 uvd = world2uvdepth(pos, shadow_matrix); return ambient + vec3(0.2) * get_shadow_mask(uvd.xy); } float linear_shadow_depth(float d) { return shadow_near * shadow_far / (shadow_far + d * (shadow_near - shadow_far)); } float get_shadow(vec3 pos) { ivec2 dim = textureSize(shadow_map, 0); vec3 uvd = world2uvdepth(pos, shadow_matrix); vec2 base_coord = uvd.xy * dim; ivec2 base_coord_i = ivec2(floor(base_coord)); vec2 inter = fract(base_coord); mat4 shadow_depths; for (int i = 0; i < 4; ++i) { for (int j = 0; j < 4; ++j) { shadow_depths[i][j] = linear_shadow_depth(texelFetch(shadow_map, base_coord_i + ivec2(i-1, j-1), 0).r); } } float threshold = linear_shadow_depth(uvd.z) - 0.1; mat2 pcf_vals = mat2(0); for (int i = 0; i < 2; ++i) { for (int j = 0; j < 2; ++j) { for (int x = 0; x < 3; ++x) { for (int y = 0; y < 3; ++y) { pcf_vals[i][j] += (shadow_depths[x + i][y + j] < threshold) ? 0 : (1.0 / 9.0); } } } } float a = mix(pcf_vals[0][0], pcf_vals[1][0], inter.x); float b = mix(pcf_vals[0][1], pcf_vals[1][1], inter.x); return mix(a, b, inter.y) * get_shadow_mask(uvd.xy); } vec4 fragment_shade() { vec3 pos = get_position(); vec3 lightdir = normalize(light_pos - pos); vec3 white_color = vec3(1, 1, 1); vec3 black_color = vec3(0.6, 0.6, 0.6); vec3 background = (white_color + black_color) / 2; vec3 diffuse_color = white_color; vec3 modpos = mod(pos / 5, 1); if ((modpos.x < 0.5) ^^ (modpos.z < 0.5)) { diffuse_color = black_color; } float blur = exp(-2 * max(length(dFdx(pos)), length(dFdy(pos)))); blur = clamp(2 * blur, 0, 1); diffuse_color = mix(background, diffuse_color, blur); vec3 ambient = get_ambient(pos); float shadow = get_shadow(pos); vec3 out_color = shadow * 0.85 * clamp(dot(get_normal(), normalize(lightdir)), 0, 1) * diffuse_color; out_color += ambient * diffuse_color; return vec4(out_color, 1); } )GLSL"); floormat.set_property("ao_map", 6); floormat.set_property("shadow_map", 7); floormat.set_property("shadow_matrix", shadow_matrix); floormat.set_property("light_pos", light_entity.get<TransformComponent>().position); floormat.set_property("shadow_near", light_entity.near_plane); floormat.set_property("shadow_far", light_entity.far_plane); floor_renderer.set_material(floormat); floor_renderer.rebuild(); // make viper scene viper::Scene sim_scene; // Set OctopusComponent property to current scene OctopusComponent::v_scene = &sim_scene; // TODO - what is octoswarm?? // CHANGE auto &octoswarm = scene.create_entity_with<OctopusComponent>(); octoswarm.renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.renderer->get_material().set_property("shadow_near", light_entity.near_plane); octoswarm.renderer->get_material().set_property("shadow_far", light_entity.far_plane); octoswarm.sphere_renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.sphere_renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.sphere_renderer->get_material().set_property( "shadow_near", light_entity.near_plane); octoswarm.sphere_renderer->get_material().set_property( "shadow_far", light_entity.far_plane); octoswarm.tsphere_renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.tsphere_renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.tsphere_renderer->get_material().set_property( "shadow_near", light_entity.near_plane); octoswarm.tsphere_renderer->get_material().set_property( "shadow_far", light_entity.far_plane); octoswarm.cannonball_renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.cannonball_renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.cannonball_renderer->get_material().set_property( "shadow_near", light_entity.near_plane); octoswarm.cannonball_renderer->get_material().set_property( "shadow_far", light_entity.far_plane); octoswarm.pillar_renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.pillar_renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.pillar_renderer->get_material().set_property( "shadow_near", light_entity.near_plane); octoswarm.pillar_renderer->get_material().set_property( "shadow_far", light_entity.far_plane); auto &c_entity = scene.create_entity_with<TrackballComponent>(); c_entity.oriented = true; // constants - window? int ww = 3840, wh = 1080; // make framebuffer Framebuffer fb, fb_shadow; RGB8Texture color_map, color_map_shadow; D32FTexture depth_map, depth_map_shadow; auto realloc = [&](int w, int h) { color_map.allocate(w, h); depth_map.allocate(w, h); }; realloc(ww, wh); depth_map_shadow.allocate(shadow_size, shadow_size); color_map_shadow.allocate(shadow_size, shadow_size); fb.attach_color_texture(color_map); fb.attach_depth_texture(depth_map); fb_shadow.attach_color_texture(color_map_shadow); fb_shadow.attach_depth_texture(depth_map_shadow); RGB8Texture colmap; Image<Eigen::Matrix<uint8_t, 3, 1>> colmap_cpu(2048, 2048); // Read in texture binary // WINGATE // TODO - change this to read in Tentacle_diffuse.png or Tentacle_convert.mtl std::ifstream("texture.bin", std::ios::binary).read( reinterpret_cast<char*>(&colmap_cpu(0, 0)), 12582912); colmap.upload(colmap_cpu); FullscreenQuad fsquad; // TODO - change defaults? Does this change the view? // No, change where says set_defaults bool show_pills = false; bool splitscreen = false; // bool splitscreen = true; // function to set pill visibility of octopi auto set_pill_visibility = [&](bool visible) { show_pills = visible; octoswarm.render_comp->visible = !visible; octoswarm.sphere_render_comp->visible = visible; octoswarm.vis_update(); }; // function to draw scene auto draw_scene = [&](int width, int height, int x, int y) { //====================================================================== // Draw shadow map fb_shadow.bind(); light_entity.draw(shadow_size, shadow_size); fb_shadow.unbind(); //====================================================================== // Draw scene with shadows fb.bind(); glActiveTexture(GL_TEXTURE5); colmap.bind(); glActiveTexture(GL_TEXTURE7); depth_map_shadow.bind(); glActiveTexture(GL_TEXTURE0); // set up camera auto &cam = c_entity.get<CameraComponent>(); cam.draw(color_map.get_width(), color_map.get_height(), 0, 0, false); // if sphere component is visible if (octoswarm.sphere_render_comp->visible) { RenderContext context; glDepthMask(GL_FALSE); context.aspect = (float)color_map.get_width() / (float)color_map.get_height(); context.vfov = cam.vfov; context.near = cam.near_plane; context.far = cam.far_plane; context.eye = cam.get<TransformComponent>().position; context.forward = cam.get<TransformComponent>().forward(); context.up = cam.get<TransformComponent>().up(); context.update_view(); context.update_projection(); auto &renderable = *octoswarm.tsphere_render_comp; auto &transform = renderable.get<TransformComponent>(); context.translation = transform.position; context.scale = transform.scale; context.rotation = transform.rotation; context.update_model(); glEnable(GL_DEPTH_TEST); renderable.get_renderer().render(context); glDepthMask(GL_TRUE); } cam.draw_gui(); fb.unbind(); //====================================================================== // Draw color map to window glViewport(x, y, width, height); fsquad.draw_texture(color_map); }; // make window function auto &window = app.create_window([&](Window &window) { std::tie(ww, wh) = window.get_size(); // TODO - what is this variable? splitscreen int fbw_new = splitscreen ? ww / 2 : ww; int fbh_new = wh; int fbw = color_map.get_width(); int fbh = color_map.get_height(); if (fbw_new != fbw || fbh_new != fbh) { realloc(fbw_new, fbh_new); } // if splitscreen, do half and half pill vilibility if (splitscreen) { // draw left half pills invisible set_pill_visibility(false); draw_scene(ww / 2, wh, 0, 0); // draw right half pills visible set_pill_visibility(true); draw_scene(ww / 2, wh, ww / 2, 0); } else { // if not split screen, call octoswarm.vis_update // TODO - what is vis_udpate? octoswarm.vis_update(); draw_scene(ww, wh, 0, 0); } }); // set window size and title window.set_size(ww, wh); window.set_title("VIPER Demo"); // set up input // TODO - keyboard / mouse? auto &input = window.get_input(); // TODO - camera position? Could be important // CHANGE c_entity.get<CameraComponent>().set_window(window); // ORIGINAL // c_entity.center = Vec3(0, 1, 0); // c_entity.get<TransformComponent>().position = Vec3(-12, 1, 0); // SINGLE BIG TENTACLE // c_entity.center = Vec3(-30, 1, -10); // c_entity.get<TransformComponent>().position = Vec3(-12, 1, 10); // WORKS FOR ZERO GRAV BIG TENTACLE // c_entity.center = Vec3(-50, 20, 40); // c_entity.get<TransformComponent>().position = Vec3(-20, -50, 20); // change this to move in opposite direction that you would think // increasing y moves further down c_entity.center = Vec3(-35, 10, 30); // making z negative moes closer c_entity.get<TransformComponent>().position = Vec3(-20, -50, 20); // TODO - what is this auto &bsphere_entity = scene.create_entity_with<WorldRenderComponent>(); auto &bsphere_renderer = bsphere_entity.set_renderer<SphereMeshRenderer>(); // get mouse position auto get_mouse_ray = [&](Vec3 &eye, Vec3 &dir) { // get position Vec2 pos = input.mouse_position; pos[1] = wh - pos[1]; int w = splitscreen ? ww / 2 : ww; pos = 2 * pos.cwiseQuotient(Vec2(w, wh)) - Vec2(1, 1); Vec4 cs(pos[0], pos[1], 0.1, 1); // TODO - what do? rotate? auto &cam = c_entity.get<CameraComponent>(); Mat4x4 inv_mat = (cam.get_projection(w, wh) * cam.get_view()).inverse(); Vec4 world = inv_mat * cs; Vec3 p = world.head<3>() / world[3]; eye = c_entity.get<TransformComponent>().position; dir = (p - eye).normalized(); }; // variables to keep track of runtime performance int framerate = 0; double frametime = 0; double sim_frametime = 0; float playback = 1.0; int it_count = 10; // TODO - change? bool hide_gui = false; bool simulating = true; bool single_step = false; bool bsphere_vis = false; std::vector<float> framerates(120); // defaults for settings and physics auto set_defaults = [&]() { // WINGATE // CHANGE THIS TO SHOW PILLS OR MESH // false - show mesh // true - show VIPER primitives // show_pills = false; show_pills = true; octoswarm.render_comp->visible = !show_pills; octoswarm.sphere_render_comp->visible = show_pills; it_count = 10; // sim_scene.gravity_strength = 1.0; sim_scene.gravity_strength = 0.1; playback = 1.0; }; set_defaults(); // Create GUI auto &canvas = scene.create_entity_with<GUICanvasComponent>(); canvas.set_action([&]() { if (hide_gui) return; ImGui::SetNextWindowSize(ImVec2(400, 500)); ImGui::Begin("Controls", nullptr, ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoSavedSettings); char fr_label[256]; sprintf(fr_label, "Framerate %i fps\n Total: %3.1f ms\n Sim: %3.1f ms", framerate, frametime, sim_frametime); ImGui::PlotLines(fr_label, &(framerates[0]), framerates.size(), 0, "", 0, 60); ImGui::Separator(); if (ImGui::Button("Reset")) { octoswarm.reset(); } ImGui::SameLine(0, 4); const char *bname = simulating ? "Pause" : "Resume"; if (ImGui::Button(bname)) { simulating = !simulating; } if (!simulating) { ImGui::SameLine(0, 4); if (ImGui::Button("Step")) { single_step = true; } } ImGui::Checkbox("Split Screen", &splitscreen); if (ImGui::Checkbox("Show Primitives", &show_pills)) { set_pill_visibility(show_pills); } ImGui::SliderFloat("Gravity", &sim_scene.gravity_strength, -1.0f, 3.0f); ImGui::SliderInt("Solver Iterations", &it_count, 0, 50); if (ImGui::Button("Set Defaults")) { set_defaults(); octoswarm.vis_update(); } ImGui::Separator(); const char *const scenes[] = {"Empty", "Pillars", "Cannonballs", "Explosion"}; if (ImGui::ListBox("Scenes", &octoswarm.scene_index, scenes, sizeof(scenes) / sizeof(scenes[0]))) { // TODO - resets octoswarm from beginning of simulation? could be important octoswarm.reset(); } ImGui::Separator(); ImGui::LabelText("Controls", "Look: Middle Mouse"); ImGui::Text("Recenter: Right Mouse"); ImGui::Text("Pan: Shift + Middle Mouse"); ImGui::Text("Grab: Left Mouse"); ImGui::Text("Shoot: Spacebar"); ImGui::Text("Toggle Primitives: F10"); ImGui::Text("Pause/Resume: F11"); ImGui::Text("Show/Hide Window: F12"); ImGui::End(); }); // place camera canvas.set_camera(c_entity.get<CameraComponent>()); int chambered_cow = 0; long frame = 0; long sim_frame = 0; // keep track of statistics double last_time = glfwGetTime(); double frame_avg = 0; double sim_frame_avg = 0; int held = 0; int selected = -1; // TODO - idk what this is bool swapped_pills = false; bool swapped_pause = false; bool swapped_window = false; bool recentered = false; // app event listener unsigned t = 0; unsigned delay = 60; app.add_listener<ApplicationUpdateEvent>( [&](const ApplicationUpdateEvent &) { // // Make muscle's contract octoswarm.update(t); t++; // if (i >= delay) { // unsigned j = i - delay + 3; // if (j < sim_scene.constraints.stretch.size()){ // if (!(j%3 == 0)){ // sim_scene.constraints.stretch[j].L *= .5; // } else { // sim_scene.constraints.stretch[j].L *= 1.2; // } // } // } // // move to origin /* if (i < 60) { int j = 51; Vec3 p = sim_scene.state.x[j]; // std::cout << p[0] << " " << p[1] << " " << p[2] << std::endl; sim_scene.state.x[j] *= 0.95; p = sim_scene.state.x[j]; // std::cout << p[0] << " " << p[1] << " " << p[2] << std::endl; // std::cout << sim_scene.state.x[0]*10 << " " << sim_scene.state.x[1]*10 << " " << sim_scene.state.x[2]*10 << std::endl; // // std::cout << sim_scene.state.xp[0]*10 << std::endl; // std::cout << std::endl; } */ /* sim_scene.state.xa[k] = 0; sim_scene.state.x[k] = sim_scene.state.x[0] / 100000; sim_scene.state.xp[k] = sim_scene.state.xp[0] / 100000; sim_scene.state.xa[k] = 1; */ SphereMesh temp_smesh; auto vs_temp = temp_smesh.add_vertex(viper::CollisionGrid::b_sphere); temp_smesh.add_sphere(vs_temp); bsphere_entity.visible = bsphere_vis; bsphere_renderer.upload_mesh(temp_smesh); // if mouseclick? if (input.get_mouse(0)) { Vec3 eye, dir; get_mouse_ray(eye, dir); // if just selected if (selected == -1) { // TODO - if click on octopus, move it around? selected = octoswarm.intersect(eye, dir); // set velocity? to 0 sim_scene.state.xa[selected] = 0; // if octopus already selected } else { Vec3 p = sim_scene.state.x[selected]; Vec3 x = p - eye; Vec3 new_pos = p - (x - dir * dir.dot(x)); new_pos[1] = std::max(new_pos[1], sim_scene.state.r[selected]); sim_scene.state.x[selected] = new_pos; sim_scene.state.xp[selected] = sim_scene.state.x[selected]; } // unselect } else if (selected != -1) { sim_scene.state.xa[selected] = 1; selected = -1; } // TODO - idk what the get_mouse 1 is // move something around? if (input.get_mouse(1)) { Image<float> depth_im; depth_map.download(depth_im); int mxi = int(input.mouse_position[0]); int myi = int(wh - input.mouse_position[1]); auto &cam = c_entity.get<CameraComponent>(); Mat4x4 inv_mat = (cam.get_projection() * cam.get_view()).inverse(); if (!recentered && !(mxi < 0 || mxi >= ww || myi < 0 || myi >= wh)) { Vec3 uvdepth; uvdepth.head<2>() = Vec2(float(mxi) / ww, float(myi) / wh); uvdepth[2] = min(depth_im(myi, mxi), 0.999); Vec4 dev(0, 0, 0, 1); dev.head<3>() = 2 * uvdepth - Vec3::Ones(); Vec4 world_h = inv_mat * dev; Vec3 new_center = world_h.head<3>() / world_h[3]; Vec3 dc = new_center - c_entity.center; c_entity.center += dc; c_entity.get<TransformComponent>().position += dc; recentered = true; } } else { recentered = false; } // F10 changes pill visibility if (input.get_key(GLFW_KEY_F10)) { if (!swapped_pills) { set_pill_visibility(!show_pills); swapped_pills = true; } } else { swapped_pills = false; } // F11 pause/unpause if (input.get_key(GLFW_KEY_F11)) { if (!swapped_pause) { simulating = !simulating; swapped_pause = true; } } else { swapped_pause = false; } // F12 hides GUI if (input.get_key(GLFW_KEY_F12)) { if (!swapped_window) { hide_gui = !hide_gui; swapped_window = true; } } else { swapped_window = false; } // Space spits out new octopus if (input.get_key(GLFW_KEY_SPACE)) { // every 5 updates if ((held % 5) == 0) { // TODO - important, makes new octopus Vec3 p = c_entity.get<TransformComponent>().position; Vec3 v = c_entity.get<TransformComponent>().forward(); octoswarm.set_position(chambered_cow, p + 3 * v, v); chambered_cow = (chambered_cow + 1) % octoswarm.n_cows; } held++; } else { held = 0; } // keep track of frame statistics double frame_time = 0.0; double this_time = last_time; while (frame_time < 0.016667) { this_time = glfwGetTime(); frame_time = this_time - last_time; std::this_thread::yield(); } last_time = this_time; framerates.erase(framerates.begin()); framerates.push_back(1.0 / frame_time); frame_avg += frame_time; if ((frame % 10) == 0) { frametime = 1000 * frame_avg / 10.0; framerate = 0.5 + 10.0 / frame_avg; frame_avg = 0; } // if simulating or supposed to take one step if (simulating || single_step) { // simulate one step forward // TODO - important double sim_time = sim_scene.step(playback / 60.f, it_count, true); sim_frame_avg += sim_time; if ((sim_frame % 10) == 0) { sim_frametime = sim_frame_avg / 10.0; sim_frame_avg = 0; } single_step = false; sim_frame++; } scene.update(); frame++; }); app.run(); return 0; }
8d51105abb107ef78cc48ab2ace6cd08684d6de0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Gaussian Blur Computation * * Maximo, Andre -- Sep, 2011 * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include <cfloat> #include <hipfft.h> #include <util/util.h> #include <util/symbol.h> //#include <util/chronos.h> #include <util/dvector.h> #include <util/timer.h> #include <util/recfilter.h> //#include "pgm.h" #include "gauss_dir_cpu.h" using namespace gpufilter; #define WS 32 ///< Warp Size #define NW 8 ///< Number of warps per block (with all warps computing) #define NB 8 ///< Number of blocks per SM #define WS1 32 ///< for gauss1 #define NW1 8 ///< for gauss1 #define NB1 8 ///< for gauss1 #define WS2 16 ///< for gauss2 #define NW2 8 ///< for gauss2 #define NB2 16 ///< for gauss2 #define WS3 32 ///< for gauss3 #define NW3 16 ///< for gauss3 #define NB3 3 ///< for gauss3 #define MNB 2 ///< Maximum number of blocks per SM #define MNT 1024 ///< Maximum number of threads per block (with MNB) unsigned RUN_TIMES=1024; int res_out; #define DEFAULT_RES_OUT 1 // res_out: 0 verbose; 1 perf; 2 error #define KERNEL_RADIUS 33 //#define KERNEL_RADIUS 47 //#define COMPUTE_IN_CPU // compute in cpu debug flag: cpu can be very slow.. //#define RUN_GAUSS_0 //#define RUN_GAUSS_1 //#define RUN_GAUSS_2 //#define RUN_GAUSS_3 //#define RUN_GAUSS_FFT //#define RUN_GAUSS_REC // get cactus with Diego Nehab //#define USE_CACTUS // use cactus debug flag: read/write file names below #define CACTUS_IN "cactus-1024.pgm" #define CACTUS_GPU "cactus-1024-direct-gpu.pgm" #define CACTUS_CPU "cactus-1024-direct-cpu.pgm" #define CACTUS_FFT_GPU "cactus-1024-fft-gpu.pgm" #define CACTUS_RECURSIVE_GPU "cactus-1024-recursive-gpu.pgm" /// Naming conventions are: c_ constant; t_ texture; g_ global memory; /// s_ shared memory; d_ device pointer; a_ cuda-array; p_ template /// parameter; f_ surface. __constant__ int c_width, c_height; __constant__ Vector< float, KERNEL_RADIUS * 2 + 1 > c_kernel; __constant__ int c_fft_width; texture< float, 2, hipReadModeElementType > t_in_img; surface< void, 2 > f_in_img; surface< void, 2 > f_out_img; /// Memory types are: TMEM texture memory; SMEM shared memory; GMEM /// global memory ///------------------------- AUXILIARY FUNCTIONS ---------------------------- __device__ inline // auxiliary reflect function in device int d_reflect( const int& i, const int& n ) { if( i < 0 ) return (-i-1); else if( i >= n ) return (2*n-i-1); else return i; } __device__ inline // auxiliary clamp function in device int d_clamp( const int& i, const int& n ) { if( i < 0 ) return 0; else if( i >= n ) return n-1; else return i; } ///----------------- GAUSS-0 - TWO-PASS CONVOLUTION IN GMEM ----------------- template< int p_radius > __global__ __launch_bounds__( WS*NW, NB ) void gauss0_rows( float *g_out, const float *g_in ) { const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row = by*NW+ty; if( row >= c_height or col >= c_width ) return; g_in += row*c_width; g_out += row*c_width + col; float s = 0.f; #pragma unroll for (int k = -p_radius; k <= p_radius; ++k) { s += g_in[ d_clamp(col+k, c_width) ] * c_kernel[ k + p_radius ]; } *g_out = s; } template< int p_radius > __global__ __launch_bounds__( WS*NW, NB ) void gauss0_cols( float *g_out, const float *g_in ) { const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row = by*NW+ty; if( row >= c_height or col >= c_width ) return; g_in += col; g_out += row*c_width + col; float s = 0.f; #pragma unroll for (int k = -p_radius; k <= p_radius; ++k) { s += g_in[ d_clamp(row+k, c_height)*c_width ] * c_kernel[ k + p_radius ]; } *g_out = s; } ///----------------- GAUSS-1 - TWO-PASS CONVOLUTION IN TMEM ----------------- template< int p_radius > __global__ __launch_bounds__( WS1*NW1, NB1 ) void gauss1_rows( void ) { const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, col = bx*WS1+tx, row = by*NW1+ty; if( row >= c_height or col >= c_width ) return; float tu = col + .5f, tv = row + .5f, s = 0.f; #pragma unroll for (int k = -p_radius; k <= p_radius; ++k) { s += tex2D( t_in_img, tu+k, tv ) * c_kernel[ k + p_radius ]; } surf2Dwrite( s, f_out_img, col*4, row, hipBoundaryModeTrap ); // trap kills kernel if outside boundary } template< int p_radius > __global__ __launch_bounds__( WS1*NW1, NB1 ) void gauss1_cols( float *g_out ) { const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, col = bx*WS1+tx, row = by*NW1+ty; if( row >= c_height or col >= c_width ) return; g_out += row*c_width + col; float tu = col + .5f, tv = row + .5f; float s = 0.f; #pragma unroll for (int k = -p_radius; k <= p_radius; ++k) { s += tex2D( t_in_img, tu, tv+k ) * c_kernel[ k + p_radius ]; } *g_out = s; } ///----------------- GAUSS-2 - TWO-PASS CONVOLUTION IN SMEM ----------------- template< int p_radius > __global__ __launch_bounds__( MNT, MNB ) void gauss2_rows( void ) { const int tx = threadIdx.x, bx = blockIdx.x, by = blockIdx.y; float tu = bx*MNT+tx + .5f, tv = by + .5f; float s = 0.f; volatile __shared__ float s_row[ MNT + p_radius*2 ]; s_row[ p_radius + tx ] = tex2D( t_in_img, tu, tv ); if( tx < p_radius ) s_row[ tx ] = tex2D( t_in_img, tu - p_radius, tv ); else if( tx < 2*p_radius ) s_row[ MNT + tx ] = tex2D( t_in_img, tu - p_radius + MNT, tv ); __syncthreads(); if( bx*MNT+tx >= c_width ) return; #pragma unroll for (int k = -p_radius; k <= p_radius; ++k) { s += s_row[ p_radius + tx + k ] * c_kernel[ k + p_radius ]; } surf2Dwrite( s, f_out_img, (bx*MNT+tx)*4, by, hipBoundaryModeTrap ); } template< int p_radius > __global__ __launch_bounds__( WS2*NW2, NB2 ) void gauss2_cols( float *g_out ) { const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y; float tu = bx*WS2+tx + .5f, tv = by*NW2+ty + .5f; float s = 0.f; volatile __shared__ float s_cols[ WS2 ][ NW2 + p_radius*2 + 1 ]; s_cols[ tx ][ p_radius + ty ] = tex2D( t_in_img, tu, tv ); if( p_radius <= NW2/2 ) { if( ty < p_radius ) s_cols[ tx ][ ty ] = tex2D( t_in_img, tu, tv - p_radius ); else if( ty < 2*p_radius ) s_cols[ tx ][ NW2 + ty ] = tex2D( t_in_img, tu, tv - p_radius + NW2 ); } else if( p_radius <= NW2 ) { if( ty < p_radius ) { s_cols[ tx ][ ty ] = tex2D( t_in_img, tu, tv - p_radius ); s_cols[ tx ][ p_radius + NW2 + ty ] = tex2D( t_in_img, tu, tv + NW2 ); } } else { for (int i = 0; i < (p_radius+NW2-1)/NW2; ++i) { int wy = i*NW2+ty; if( wy < p_radius ) { s_cols[ tx ][ wy ] = tex2D( t_in_img, tu, tv - p_radius + i*NW2 ); s_cols[ tx ][ p_radius + NW2 + wy ] = tex2D( t_in_img, tu, tv + NW2 + i*NW2 ); } } } __syncthreads(); if( bx*WS2+tx >= c_width or by*NW2+ty >= c_height ) return; g_out += (by*NW2+ty)*c_width + bx*WS2+tx; #pragma unroll for (int k = -p_radius; k <= p_radius; ++k) { s += s_cols[tx][ p_radius + ty + k ] * c_kernel[ k + p_radius ]; } *g_out = s; } ///----------------- GAUSS-3 - ONE-PASS CONVOLUTION ----------------- template< int p_radius > __device__ void load_convolve_rows( volatile float *s_in, const int& tx, const float& tu, const float& tv ) { // load middle data s_in[ p_radius + tx ] = tex2D( t_in_img, tu, tv ); // load left and right data if( p_radius <= WS3/2 ) { if( tx < p_radius ) s_in[ tx ] = tex2D( t_in_img, tu - p_radius, tv ); else if( tx < p_radius*2 ) s_in[ WS3 + tx ] = tex2D( t_in_img, tu - p_radius + WS3, tv ); } else if( p_radius <= WS3 ) { if( tx < p_radius ) { s_in[ tx ] = tex2D( t_in_img, tu - p_radius, tv ); s_in[ p_radius + WS3 + tx ] = tex2D( t_in_img, tu + WS3, tv ); } } else { for (int i = 0; i < (p_radius+WS3-1)/WS3; ++i) { int wx = i*WS3+tx; if( wx < p_radius ) { s_in[ wx ] = tex2D( t_in_img, tu - p_radius + i*WS3, tv ); s_in[ p_radius + WS3 + wx ] = tex2D( t_in_img, tu + WS3 + i*WS3, tv ); } } } // convolve row float s = 0.f; for (int k = -p_radius; k <= p_radius; ++k) { s += s_in[ p_radius + tx + k ] * c_kernel[ k + p_radius ]; } s_in[ p_radius + tx ] = s; } template< int p_radius > __global__ __launch_bounds__( WS3*NW3, NB3 ) void gauss3( float *g_out ) { int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, col = bx*WS3+tx, row = by*NW3+ty; if( row >= c_height or col >= c_width ) bx = -1; float tu = col + .5f, tv = row + .5f; volatile __shared__ float s_inblock[ NW3 + p_radius*2 ][ WS3 + p_radius*2 ]; // load middle data load_convolve_rows< p_radius >( &s_inblock[ p_radius + ty ][0], tx, tu, tv ); // load upper and lower data if( p_radius <= NW3/2 ) { if( ty < p_radius ) load_convolve_rows< p_radius >( &s_inblock[ ty ][0], tx, tu, tv - p_radius ); else if( ty < p_radius*2 ) load_convolve_rows< p_radius >( &s_inblock[ NW3 + ty ][0], tx, tu, tv - p_radius + NW3 ); } else if( p_radius <= NW3 ) { if( ty < p_radius ) { load_convolve_rows< p_radius >( &s_inblock[ ty ][0], tx, tu, tv - p_radius ); load_convolve_rows< p_radius >( &s_inblock[ p_radius + NW3 + ty ][0], tx, tu, tv + NW3 ); } } else { for (int i = 0; i < (p_radius+NW3-1)/NW3; ++i) { int wy = i*NW3+ty; if( wy < p_radius ) { load_convolve_rows< p_radius >( &s_inblock[ wy ][0], tx, tu, tv - p_radius + i*NW3 ); load_convolve_rows< p_radius >( &s_inblock[ p_radius + NW3 + wy ][0], tx, tu, tv + NW3 + i*NW3 ); } } } __syncthreads(); if( bx == -1 ) return; g_out += row*c_width + col; // convolve cols float s = 0.f; for (int k = -p_radius; k <= p_radius; ++k) { s += s_inblock[ p_radius + ty + k ][ p_radius + tx ] * c_kernel[ k + p_radius ]; } *g_out = s; } ///----------------- GAUSS-FFT - CONVOLUTION THROUGH FFT ----------------- __global__ __launch_bounds__( WS*NW, NB ) void apply_gauss_hat_kernel( float2 *img, float cte ) { const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; if( x >= c_fft_width or y >= c_height ) return; float wx = float(x)/c_fft_width/2.f, wy; if( y < c_height/2 ) wy = float(y)/c_height; else wy = float(y-c_height)/c_height; float g = exp( -cte * (wx*wx + wy*wy)/2.f ); float2 val = img[y*c_fft_width+x]; val.x *= g; val.y *= g; img[y*c_fft_width+x] = val; } template< int radius > void apply_gauss_hat( float2 *img_spectrum, const dim3& dim_grid, const dim3& dim_block ) { float sigma = (float)radius/2; float cte = sigma*2*M_PI; cte *= cte; hipLaunchKernelGGL(( apply_gauss_hat_kernel), dim3(dim_grid), dim3(dim_block) , 0, 0, img_spectrum, cte ); } ///---------------- MAIN ---------------- int main( int argc, char** argv ) { int width = 4096, height = 2048; if( argc > 2 && ( sscanf( argv[1], "%d", &width ) != 1 || sscanf( argv[2], "%d", &height ) != 1 || width < 1 || height < 1 ) ) { fprintf( stderr, "[error] Invalid argument: %s %s\n[usage] %s [width=%d] [height=%d] [output=0-no|1-perf|2-err]\n", argv[1], argv[2], argv[0], width, height ); return 1; } else if( argc == 2 || argc > 4 ) { fprintf( stderr, "[usage] %s [width=%d] [height=%d] [output=0-no|1-perf|2-err]\n", argv[0], width, height ); return 1; } res_out = ( argc == 4 ) ? res_out = argv[3][0]-48 : DEFAULT_RES_OUT; if( res_out != 0 && res_out != 1 && res_out != 2 ) { fprintf( stderr, "[error] Invalid output type %d\n\tValids are: 0 - no output; 1 - performance; 2 - error\n", res_out ); return 1; } int ne = width * height; // number of elements float dt = 0.f; std::vector< float > kernel; int radius = KERNEL_RADIUS; compute_gauss( kernel, radius ); std::vector< float > h_in_img; #ifdef USE_CACTUS int maxval; if( !res_out ) { printf( "[gauss] Reading %s ... ", CACTUS_IN ); fflush(stdin); if( load_pgm( h_in_img, width, height, maxval, CACTUS_IN ) ) printf( "done! (image: %d x %d)\n", width, height ); else { fprintf( stderr, "!!! error!" ); return 1; } ne = width * height; } else { h_in_img.resize( ne ); maxval = 255; srand( 1234 ); for (int i = 0; i < ne; ++i) h_in_img[i] = rand() / (double)RAND_MAX; } #else if( !res_out ) { printf( "[gauss] Generating random image (%dx%d) ... ", width, height ); fflush(stdin); } h_in_img.resize( ne ); srand( 1234 ); for (int i = 0; i < ne; ++i) h_in_img[i] = rand() / (double)RAND_MAX; if( !res_out ) printf( "done!\n" ); #endif if( !res_out ) { printf( "[gauss] Allocating memory in CPU ... " ); fflush(stdin); } std::vector< float > h_ref_img( ne ), h_out_img( ne ), h_fft_img( ne ); if( !res_out ) printf( "done!\n"); #ifdef COMPUTE_IN_CPU if( !res_out ) { printf( "[gauss] Computing in CPU ... " ); fflush(stdin); } Chronos te; // time elapsed computation te.reset(); gauss_cpu( &h_ref_img[0], &h_in_img[0], width, height, kernel, radius ); dt = te.elapsed(); if( !res_out ) printf( "done!\n[CPU] reference done in %gs @ %g MiP/s\n", dt, ne/(1024.*1024.*dt) ); #endif #ifdef USE_CACTUS if( !res_out ) { printf( "[gauss] Writing %s ... ", CACTUS_CPU ); fflush(stdin); if( save_pgm( h_ref_img, width, height, maxval, CACTUS_CPU ) ) printf( "done!\n" ); else { fprintf( stderr, "!!! error!" ); return 1; } } #endif if( !res_out ) { printf( "[gauss] Allocating memory in GPU ... " ); fflush(stdin); } hipChannelFormatDesc ccd = hipCreateChannelDesc<float>(); // cuda channel descriptor for texture hipArray *a_in_img; hipMallocArray( &a_in_img, &ccd, width, height ); hipArray *a_fin_img; hipMallocArray( &a_fin_img, &ccd, width, height, hipArraySurfaceLoadStore ); // array for surface in hipArray *a_out_img; hipMallocArray( &a_out_img, &ccd, width, height, hipArraySurfaceLoadStore ); // array for surface out float *d_in_img = 0; hipMalloc( (void**)&d_in_img, sizeof(float)*ne ); float *d_out_img = 0; hipMalloc( (void**)&d_out_img, sizeof(float)*ne ); int fft_width = width/2+1; Vector< float, KERNEL_RADIUS*2+1 > kernel_gpu; for (int i=0; i<KERNEL_RADIUS*2+1; ++i) kernel_gpu[i] = kernel[i]; copy_to_symbol(c_width, width); copy_to_symbol(c_height, height); copy_to_symbol(c_kernel, kernel_gpu); copy_to_symbol(c_fft_width, fft_width); t_in_img.addressMode[0] = hipAddressModeClamp; t_in_img.addressMode[1] = hipAddressModeClamp; t_in_img.filterMode = hipFilterModePoint; t_in_img.normalized = false; if( !a_in_img || !a_fin_img || !a_out_img || !d_in_img || !d_out_img ) { fprintf( stderr, "!!! error!\n" ); return 1; } if( !res_out ) { printf( "done!\n[gauss] Computing in GPU ...\n" ); } if( !res_out ) { printf( "[gauss] Info: r = %d ; b = %dx%d\n", radius, WS, NW ); fflush(stdin); } //if( res_out ) { printf( "%d %d", width, height ); fflush(stdin); } float me = 0.f, mre = 0.f; // maximum error and maximum relative error //int w_ws = (width+WS-1)/WS, h_nw = (height+NW-1)/NW, w_hws = (width+HWS-1)/HWS, w_mnt = (width+MNT-1)/MNT; hipEvent_t start_device, stop_device; hipEventCreate(&start_device); hipEventCreate(&stop_device); //hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); #ifdef RUN_GAUSS_0 { // gauss-0 hipMemcpy( d_in_img, &h_in_img[0], sizeof(float)*ne, hipMemcpyHostToDevice ); hipEventRecord( start_device, 0 ); for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) { hipLaunchKernelGGL(( gauss0_rows< KERNEL_RADIUS >) , dim3(dim3((width+WS-1)/WS, (height+NW-1)/NW)), dim3(dim3(WS, NW)) , 0, 0, d_out_img, d_in_img ); hipLaunchKernelGGL(( gauss0_cols< KERNEL_RADIUS >) , dim3(dim3((width+WS-1)/WS, (height+NW-1)/NW)), dim3(dim3(WS, NW)) , 0, 0, d_in_img, d_out_img ); } hipEventRecord( stop_device, 0 ); hipEventSynchronize( stop_device ); dt = 0.f; hipEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f; if( res_out == 1 ) dt /= float(RUN_TIMES); hipMemcpy( &h_out_img[0], d_in_img, sizeof(float)*ne, hipMemcpyDeviceToHost ); #ifdef COMPUTE_IN_CPU check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre); #endif if( res_out == 1 ) printf( " %g", ne/(1024.*1024.*dt) ); else if( res_out == 2 ) printf( " %g %g", me, mre ); else printf( "[GPU] gauss0 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) ); } #endif #ifdef RUN_GAUSS_1 { // gauss-1 hipMemcpyToArray( a_in_img, 0, 0, &h_in_img[0], sizeof(float)*ne, hipMemcpyHostToDevice ); hipEventRecord( start_device, 0 ); hipBindSurfaceToArray( f_out_img, a_out_img ); for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) { hipBindTextureToArray( t_in_img, a_in_img ); hipLaunchKernelGGL(( gauss1_rows< KERNEL_RADIUS >) , dim3(dim3((width+WS1-1)/WS1, (height+NW1-1)/NW1)), dim3(dim3(WS1, NW1)) , 0, 0, ); // fortunately output surface can be used as input texture afterwards hipBindTextureToArray( t_in_img, a_out_img ); hipLaunchKernelGGL(( gauss1_cols< KERNEL_RADIUS >) , dim3(dim3((width+WS1-1)/WS1, (height+NW1-1)/NW1)), dim3(dim3(WS1, NW1)) , 0, 0, d_out_img ); } hipEventRecord( stop_device, 0 ); hipEventSynchronize( stop_device ); hipUnbindTexture( t_in_img ); dt = 0.f; hipEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f; if( res_out == 1 ) dt /= float(RUN_TIMES); hipMemcpy( &h_out_img[0], d_out_img, sizeof(float)*ne, hipMemcpyDeviceToHost ); #ifdef COMPUTE_IN_CPU check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre); #endif if( res_out == 1 ) printf( " %g", ne/(1024.*1024.*dt) ); else if( res_out == 2 ) printf( " %g %g", me, mre ); else printf( "[GPU] gauss1 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) ); } #endif hipDeviceSetCacheConfig( hipFuncCachePreferShared ); #ifdef RUN_GAUSS_2 { // gauss-2 hipMemcpyToArray( a_in_img, 0, 0, &h_in_img[0], sizeof(float)*ne, hipMemcpyHostToDevice ); hipEventRecord( start_device, 0 ); hipBindSurfaceToArray( f_out_img, a_out_img ); for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) { hipBindTextureToArray( t_in_img, a_in_img ); hipLaunchKernelGGL(( gauss2_rows< KERNEL_RADIUS >) , dim3(dim3((width+MNT-1)/MNT, height)), dim3(dim3(MNT, 1)) , 0, 0, ); hipBindTextureToArray( t_in_img, a_out_img ); hipLaunchKernelGGL(( gauss2_cols< KERNEL_RADIUS >) , dim3(dim3((width+WS2-1)/WS2, (height+NW2-1)/NW2)), dim3(dim3(WS2, NW2)) , 0, 0, d_out_img ); } hipEventRecord( stop_device, 0 ); hipEventSynchronize( stop_device ); hipUnbindTexture( t_in_img ); dt = 0.f; hipEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f; if( res_out == 1 ) dt /= float(RUN_TIMES); hipMemcpy( &h_out_img[0], d_out_img, sizeof(float)*ne, hipMemcpyDeviceToHost ); #ifdef COMPUTE_IN_CPU check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre); #endif if( res_out == 1 ) printf( " %g", ne/(1024.*1024.*dt) ); else if( res_out == 2 ) printf( " %g %g", me, mre ); else printf( "[GPU] gauss2 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) ); #ifdef USE_CACTUS if( !res_out ) { printf( "[gauss] Writing %s ... ", CACTUS_GPU ); fflush(stdin); if( save_pgm( h_out_img, width, height, maxval, CACTUS_GPU ) ) printf( "done!\n" ); else { fprintf( stderr, "!!! error!" ); return 1; } } #endif } #endif #ifdef RUN_GAUSS_3 { // gauss-3 hipMemcpyToArray( a_in_img, 0, 0, &h_in_img[0], sizeof(float)*ne, hipMemcpyHostToDevice ); hipEventRecord( start_device, 0 ); hipBindTextureToArray( t_in_img, a_in_img ); for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) { hipLaunchKernelGGL(( gauss3< KERNEL_RADIUS >) , dim3(dim3((width+WS3-1)/WS3, (height+NW3-1)/NW3)), dim3(dim3(WS3, NW3)) , 0, 0, d_out_img ); } hipEventRecord( stop_device, 0 ); hipEventSynchronize( stop_device ); hipUnbindTexture( t_in_img ); dt = 0.f; hipEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f; if( res_out == 1 ) dt /= float(RUN_TIMES); hipMemcpy( &h_out_img[0], d_out_img, sizeof(float)*ne, hipMemcpyDeviceToHost ); #ifdef COMPUTE_IN_CPU check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre); #endif if( res_out == 1 ) printf( " %g", ne/(1024.*1024.*dt) ); else if( res_out == 2 ) printf( " %g %g", me, mre ); else printf( "[GPU] gauss3 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) ); } #endif #ifdef RUN_GAUSS_FFT { // gauss-fft dvector< float > d_img( h_in_img ); int fftW = width/2+1, fftH = height; dvector< float2 > d_img_hat( fftW * fftH ); dim3 dim_block( WS, NW ), dim_grid( (fftW+1)/dim_block.x, (fftH+1)/dim_block.y ); hipfftHandle planFwd, planInv; hipfftPlan2d(&planFwd, width, height, HIPFFT_R2C); hipfftPlan2d(&planInv, width, height, HIPFFT_C2R); hipEventRecord( start_device, 0 ); for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) { hipfftExecR2C(planFwd, d_img, (hipfftComplex *)(float2 *)d_img_hat); apply_gauss_hat< KERNEL_RADIUS >(d_img_hat, dim_grid, dim_block ); hipfftExecC2R(planInv, (hipfftComplex *)(float2 *)d_img_hat, d_img); } hipEventRecord( stop_device, 0 ); hipEventSynchronize( stop_device ); dt = 0.f; hipEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f; if( res_out == 1 ) dt /= float(RUN_TIMES); hipfftDestroy(planFwd); hipfftDestroy(planInv); h_fft_img = to_cpu(d_img); for (int i=0; i<ne; ++i) h_fft_img[i] /= (float)ne; // gauss-fft calculates the exact convolution, so it might be different // from the cpu reference #ifdef COMPUTE_IN_CPU check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre); #endif if( res_out == 1 ) printf( " %g", ne/(1024.*1024.*dt) ); else if( res_out == 2 ) printf( " %g %g", me, mre ); else printf( "[GPU] gauss-fft done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) ); #ifdef USE_CACTUS if( !res_out ) { printf( "[gauss] Writing %s ... ", CACTUS_FFT_GPU ); fflush(stdin); if( save_pgm( h_fft_img, width, height, maxval, CACTUS_FFT_GPU ) ) printf( "done!\n" ); else { fprintf( stderr, "!!! error!" ); return 1; } } #endif } #endif #ifdef RUN_GAUSS_REC { // gauss-recursive void rec_gauss(float *h_img, int width, int height, float sigma); std::vector<float> h_img = h_in_img; rec_gauss(&h_img[0], width, height, radius/2); #ifdef USE_CACTUS if( !res_out ) { printf( "[gauss] Writing %s ... ", CACTUS_RECURSIVE_GPU ); fflush(stdin); if( save_pgm(h_img, width, height, maxval, CACTUS_RECURSIVE_GPU) ) printf( "done!\n" ); else { fprintf( stderr, "!!! error!" ); return 1; } } #endif } #endif //if( res_out ) printf( "\n" ); hipEventDestroy(start_device); hipEventDestroy(stop_device); hipFreeArray( a_in_img ); hipFreeArray( a_fin_img ); hipFreeArray( a_out_img ); hipFree( d_in_img ); hipFree( d_out_img ); return 0; }
8d51105abb107ef78cc48ab2ace6cd08684d6de0.cu
/** * Gaussian Blur Computation * * Maximo, Andre -- Sep, 2011 * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include <cfloat> #include <cufft.h> #include <util/util.h> #include <util/symbol.h> //#include <util/chronos.h> #include <util/dvector.h> #include <util/timer.h> #include <util/recfilter.h> //#include "pgm.h" #include "gauss_dir_cpu.h" using namespace gpufilter; #define WS 32 ///< Warp Size #define NW 8 ///< Number of warps per block (with all warps computing) #define NB 8 ///< Number of blocks per SM #define WS1 32 ///< for gauss1 #define NW1 8 ///< for gauss1 #define NB1 8 ///< for gauss1 #define WS2 16 ///< for gauss2 #define NW2 8 ///< for gauss2 #define NB2 16 ///< for gauss2 #define WS3 32 ///< for gauss3 #define NW3 16 ///< for gauss3 #define NB3 3 ///< for gauss3 #define MNB 2 ///< Maximum number of blocks per SM #define MNT 1024 ///< Maximum number of threads per block (with MNB) unsigned RUN_TIMES=1024; int res_out; #define DEFAULT_RES_OUT 1 // res_out: 0 verbose; 1 perf; 2 error #define KERNEL_RADIUS 33 //#define KERNEL_RADIUS 47 //#define COMPUTE_IN_CPU // compute in cpu debug flag: cpu can be very slow.. //#define RUN_GAUSS_0 //#define RUN_GAUSS_1 //#define RUN_GAUSS_2 //#define RUN_GAUSS_3 //#define RUN_GAUSS_FFT //#define RUN_GAUSS_REC // get cactus with Diego Nehab //#define USE_CACTUS // use cactus debug flag: read/write file names below #define CACTUS_IN "cactus-1024.pgm" #define CACTUS_GPU "cactus-1024-direct-gpu.pgm" #define CACTUS_CPU "cactus-1024-direct-cpu.pgm" #define CACTUS_FFT_GPU "cactus-1024-fft-gpu.pgm" #define CACTUS_RECURSIVE_GPU "cactus-1024-recursive-gpu.pgm" /// Naming conventions are: c_ constant; t_ texture; g_ global memory; /// s_ shared memory; d_ device pointer; a_ cuda-array; p_ template /// parameter; f_ surface. __constant__ int c_width, c_height; __constant__ Vector< float, KERNEL_RADIUS * 2 + 1 > c_kernel; __constant__ int c_fft_width; texture< float, 2, cudaReadModeElementType > t_in_img; surface< void, 2 > f_in_img; surface< void, 2 > f_out_img; /// Memory types are: TMEM texture memory; SMEM shared memory; GMEM /// global memory ///------------------------- AUXILIARY FUNCTIONS ---------------------------- __device__ inline // auxiliary reflect function in device int d_reflect( const int& i, const int& n ) { if( i < 0 ) return (-i-1); else if( i >= n ) return (2*n-i-1); else return i; } __device__ inline // auxiliary clamp function in device int d_clamp( const int& i, const int& n ) { if( i < 0 ) return 0; else if( i >= n ) return n-1; else return i; } ///----------------- GAUSS-0 - TWO-PASS CONVOLUTION IN GMEM ----------------- template< int p_radius > __global__ __launch_bounds__( WS*NW, NB ) void gauss0_rows( float *g_out, const float *g_in ) { const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row = by*NW+ty; if( row >= c_height or col >= c_width ) return; g_in += row*c_width; g_out += row*c_width + col; float s = 0.f; #pragma unroll for (int k = -p_radius; k <= p_radius; ++k) { s += g_in[ d_clamp(col+k, c_width) ] * c_kernel[ k + p_radius ]; } *g_out = s; } template< int p_radius > __global__ __launch_bounds__( WS*NW, NB ) void gauss0_cols( float *g_out, const float *g_in ) { const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row = by*NW+ty; if( row >= c_height or col >= c_width ) return; g_in += col; g_out += row*c_width + col; float s = 0.f; #pragma unroll for (int k = -p_radius; k <= p_radius; ++k) { s += g_in[ d_clamp(row+k, c_height)*c_width ] * c_kernel[ k + p_radius ]; } *g_out = s; } ///----------------- GAUSS-1 - TWO-PASS CONVOLUTION IN TMEM ----------------- template< int p_radius > __global__ __launch_bounds__( WS1*NW1, NB1 ) void gauss1_rows( void ) { const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, col = bx*WS1+tx, row = by*NW1+ty; if( row >= c_height or col >= c_width ) return; float tu = col + .5f, tv = row + .5f, s = 0.f; #pragma unroll for (int k = -p_radius; k <= p_radius; ++k) { s += tex2D( t_in_img, tu+k, tv ) * c_kernel[ k + p_radius ]; } surf2Dwrite( s, f_out_img, col*4, row, cudaBoundaryModeTrap ); // trap kills kernel if outside boundary } template< int p_radius > __global__ __launch_bounds__( WS1*NW1, NB1 ) void gauss1_cols( float *g_out ) { const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, col = bx*WS1+tx, row = by*NW1+ty; if( row >= c_height or col >= c_width ) return; g_out += row*c_width + col; float tu = col + .5f, tv = row + .5f; float s = 0.f; #pragma unroll for (int k = -p_radius; k <= p_radius; ++k) { s += tex2D( t_in_img, tu, tv+k ) * c_kernel[ k + p_radius ]; } *g_out = s; } ///----------------- GAUSS-2 - TWO-PASS CONVOLUTION IN SMEM ----------------- template< int p_radius > __global__ __launch_bounds__( MNT, MNB ) void gauss2_rows( void ) { const int tx = threadIdx.x, bx = blockIdx.x, by = blockIdx.y; float tu = bx*MNT+tx + .5f, tv = by + .5f; float s = 0.f; volatile __shared__ float s_row[ MNT + p_radius*2 ]; s_row[ p_radius + tx ] = tex2D( t_in_img, tu, tv ); if( tx < p_radius ) s_row[ tx ] = tex2D( t_in_img, tu - p_radius, tv ); else if( tx < 2*p_radius ) s_row[ MNT + tx ] = tex2D( t_in_img, tu - p_radius + MNT, tv ); __syncthreads(); if( bx*MNT+tx >= c_width ) return; #pragma unroll for (int k = -p_radius; k <= p_radius; ++k) { s += s_row[ p_radius + tx + k ] * c_kernel[ k + p_radius ]; } surf2Dwrite( s, f_out_img, (bx*MNT+tx)*4, by, cudaBoundaryModeTrap ); } template< int p_radius > __global__ __launch_bounds__( WS2*NW2, NB2 ) void gauss2_cols( float *g_out ) { const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y; float tu = bx*WS2+tx + .5f, tv = by*NW2+ty + .5f; float s = 0.f; volatile __shared__ float s_cols[ WS2 ][ NW2 + p_radius*2 + 1 ]; s_cols[ tx ][ p_radius + ty ] = tex2D( t_in_img, tu, tv ); if( p_radius <= NW2/2 ) { if( ty < p_radius ) s_cols[ tx ][ ty ] = tex2D( t_in_img, tu, tv - p_radius ); else if( ty < 2*p_radius ) s_cols[ tx ][ NW2 + ty ] = tex2D( t_in_img, tu, tv - p_radius + NW2 ); } else if( p_radius <= NW2 ) { if( ty < p_radius ) { s_cols[ tx ][ ty ] = tex2D( t_in_img, tu, tv - p_radius ); s_cols[ tx ][ p_radius + NW2 + ty ] = tex2D( t_in_img, tu, tv + NW2 ); } } else { for (int i = 0; i < (p_radius+NW2-1)/NW2; ++i) { int wy = i*NW2+ty; if( wy < p_radius ) { s_cols[ tx ][ wy ] = tex2D( t_in_img, tu, tv - p_radius + i*NW2 ); s_cols[ tx ][ p_radius + NW2 + wy ] = tex2D( t_in_img, tu, tv + NW2 + i*NW2 ); } } } __syncthreads(); if( bx*WS2+tx >= c_width or by*NW2+ty >= c_height ) return; g_out += (by*NW2+ty)*c_width + bx*WS2+tx; #pragma unroll for (int k = -p_radius; k <= p_radius; ++k) { s += s_cols[tx][ p_radius + ty + k ] * c_kernel[ k + p_radius ]; } *g_out = s; } ///----------------- GAUSS-3 - ONE-PASS CONVOLUTION ----------------- template< int p_radius > __device__ void load_convolve_rows( volatile float *s_in, const int& tx, const float& tu, const float& tv ) { // load middle data s_in[ p_radius + tx ] = tex2D( t_in_img, tu, tv ); // load left and right data if( p_radius <= WS3/2 ) { if( tx < p_radius ) s_in[ tx ] = tex2D( t_in_img, tu - p_radius, tv ); else if( tx < p_radius*2 ) s_in[ WS3 + tx ] = tex2D( t_in_img, tu - p_radius + WS3, tv ); } else if( p_radius <= WS3 ) { if( tx < p_radius ) { s_in[ tx ] = tex2D( t_in_img, tu - p_radius, tv ); s_in[ p_radius + WS3 + tx ] = tex2D( t_in_img, tu + WS3, tv ); } } else { for (int i = 0; i < (p_radius+WS3-1)/WS3; ++i) { int wx = i*WS3+tx; if( wx < p_radius ) { s_in[ wx ] = tex2D( t_in_img, tu - p_radius + i*WS3, tv ); s_in[ p_radius + WS3 + wx ] = tex2D( t_in_img, tu + WS3 + i*WS3, tv ); } } } // convolve row float s = 0.f; for (int k = -p_radius; k <= p_radius; ++k) { s += s_in[ p_radius + tx + k ] * c_kernel[ k + p_radius ]; } s_in[ p_radius + tx ] = s; } template< int p_radius > __global__ __launch_bounds__( WS3*NW3, NB3 ) void gauss3( float *g_out ) { int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, col = bx*WS3+tx, row = by*NW3+ty; if( row >= c_height or col >= c_width ) bx = -1; float tu = col + .5f, tv = row + .5f; volatile __shared__ float s_inblock[ NW3 + p_radius*2 ][ WS3 + p_radius*2 ]; // load middle data load_convolve_rows< p_radius >( &s_inblock[ p_radius + ty ][0], tx, tu, tv ); // load upper and lower data if( p_radius <= NW3/2 ) { if( ty < p_radius ) load_convolve_rows< p_radius >( &s_inblock[ ty ][0], tx, tu, tv - p_radius ); else if( ty < p_radius*2 ) load_convolve_rows< p_radius >( &s_inblock[ NW3 + ty ][0], tx, tu, tv - p_radius + NW3 ); } else if( p_radius <= NW3 ) { if( ty < p_radius ) { load_convolve_rows< p_radius >( &s_inblock[ ty ][0], tx, tu, tv - p_radius ); load_convolve_rows< p_radius >( &s_inblock[ p_radius + NW3 + ty ][0], tx, tu, tv + NW3 ); } } else { for (int i = 0; i < (p_radius+NW3-1)/NW3; ++i) { int wy = i*NW3+ty; if( wy < p_radius ) { load_convolve_rows< p_radius >( &s_inblock[ wy ][0], tx, tu, tv - p_radius + i*NW3 ); load_convolve_rows< p_radius >( &s_inblock[ p_radius + NW3 + wy ][0], tx, tu, tv + NW3 + i*NW3 ); } } } __syncthreads(); if( bx == -1 ) return; g_out += row*c_width + col; // convolve cols float s = 0.f; for (int k = -p_radius; k <= p_radius; ++k) { s += s_inblock[ p_radius + ty + k ][ p_radius + tx ] * c_kernel[ k + p_radius ]; } *g_out = s; } ///----------------- GAUSS-FFT - CONVOLUTION THROUGH FFT ----------------- __global__ __launch_bounds__( WS*NW, NB ) void apply_gauss_hat_kernel( float2 *img, float cte ) { const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; if( x >= c_fft_width or y >= c_height ) return; float wx = float(x)/c_fft_width/2.f, wy; if( y < c_height/2 ) wy = float(y)/c_height; else wy = float(y-c_height)/c_height; float g = exp( -cte * (wx*wx + wy*wy)/2.f ); float2 val = img[y*c_fft_width+x]; val.x *= g; val.y *= g; img[y*c_fft_width+x] = val; } template< int radius > void apply_gauss_hat( float2 *img_spectrum, const dim3& dim_grid, const dim3& dim_block ) { float sigma = (float)radius/2; float cte = sigma*2*M_PI; cte *= cte; apply_gauss_hat_kernel<<< dim_grid, dim_block >>>( img_spectrum, cte ); } ///---------------- MAIN ---------------- int main( int argc, char** argv ) { int width = 4096, height = 2048; if( argc > 2 && ( sscanf( argv[1], "%d", &width ) != 1 || sscanf( argv[2], "%d", &height ) != 1 || width < 1 || height < 1 ) ) { fprintf( stderr, "[error] Invalid argument: %s %s\n[usage] %s [width=%d] [height=%d] [output=0-no|1-perf|2-err]\n", argv[1], argv[2], argv[0], width, height ); return 1; } else if( argc == 2 || argc > 4 ) { fprintf( stderr, "[usage] %s [width=%d] [height=%d] [output=0-no|1-perf|2-err]\n", argv[0], width, height ); return 1; } res_out = ( argc == 4 ) ? res_out = argv[3][0]-48 : DEFAULT_RES_OUT; if( res_out != 0 && res_out != 1 && res_out != 2 ) { fprintf( stderr, "[error] Invalid output type %d\n\tValids are: 0 - no output; 1 - performance; 2 - error\n", res_out ); return 1; } int ne = width * height; // number of elements float dt = 0.f; std::vector< float > kernel; int radius = KERNEL_RADIUS; compute_gauss( kernel, radius ); std::vector< float > h_in_img; #ifdef USE_CACTUS int maxval; if( !res_out ) { printf( "[gauss] Reading %s ... ", CACTUS_IN ); fflush(stdin); if( load_pgm( h_in_img, width, height, maxval, CACTUS_IN ) ) printf( "done! (image: %d x %d)\n", width, height ); else { fprintf( stderr, "!!! error!" ); return 1; } ne = width * height; } else { h_in_img.resize( ne ); maxval = 255; srand( 1234 ); for (int i = 0; i < ne; ++i) h_in_img[i] = rand() / (double)RAND_MAX; } #else if( !res_out ) { printf( "[gauss] Generating random image (%dx%d) ... ", width, height ); fflush(stdin); } h_in_img.resize( ne ); srand( 1234 ); for (int i = 0; i < ne; ++i) h_in_img[i] = rand() / (double)RAND_MAX; if( !res_out ) printf( "done!\n" ); #endif if( !res_out ) { printf( "[gauss] Allocating memory in CPU ... " ); fflush(stdin); } std::vector< float > h_ref_img( ne ), h_out_img( ne ), h_fft_img( ne ); if( !res_out ) printf( "done!\n"); #ifdef COMPUTE_IN_CPU if( !res_out ) { printf( "[gauss] Computing in CPU ... " ); fflush(stdin); } Chronos te; // time elapsed computation te.reset(); gauss_cpu( &h_ref_img[0], &h_in_img[0], width, height, kernel, radius ); dt = te.elapsed(); if( !res_out ) printf( "done!\n[CPU] reference done in %gs @ %g MiP/s\n", dt, ne/(1024.*1024.*dt) ); #endif #ifdef USE_CACTUS if( !res_out ) { printf( "[gauss] Writing %s ... ", CACTUS_CPU ); fflush(stdin); if( save_pgm( h_ref_img, width, height, maxval, CACTUS_CPU ) ) printf( "done!\n" ); else { fprintf( stderr, "!!! error!" ); return 1; } } #endif if( !res_out ) { printf( "[gauss] Allocating memory in GPU ... " ); fflush(stdin); } cudaChannelFormatDesc ccd = cudaCreateChannelDesc<float>(); // cuda channel descriptor for texture cudaArray *a_in_img; cudaMallocArray( &a_in_img, &ccd, width, height ); cudaArray *a_fin_img; cudaMallocArray( &a_fin_img, &ccd, width, height, cudaArraySurfaceLoadStore ); // array for surface in cudaArray *a_out_img; cudaMallocArray( &a_out_img, &ccd, width, height, cudaArraySurfaceLoadStore ); // array for surface out float *d_in_img = 0; cudaMalloc( (void**)&d_in_img, sizeof(float)*ne ); float *d_out_img = 0; cudaMalloc( (void**)&d_out_img, sizeof(float)*ne ); int fft_width = width/2+1; Vector< float, KERNEL_RADIUS*2+1 > kernel_gpu; for (int i=0; i<KERNEL_RADIUS*2+1; ++i) kernel_gpu[i] = kernel[i]; copy_to_symbol(c_width, width); copy_to_symbol(c_height, height); copy_to_symbol(c_kernel, kernel_gpu); copy_to_symbol(c_fft_width, fft_width); t_in_img.addressMode[0] = cudaAddressModeClamp; t_in_img.addressMode[1] = cudaAddressModeClamp; t_in_img.filterMode = cudaFilterModePoint; t_in_img.normalized = false; if( !a_in_img || !a_fin_img || !a_out_img || !d_in_img || !d_out_img ) { fprintf( stderr, "!!! error!\n" ); return 1; } if( !res_out ) { printf( "done!\n[gauss] Computing in GPU ...\n" ); } if( !res_out ) { printf( "[gauss] Info: r = %d ; b = %dx%d\n", radius, WS, NW ); fflush(stdin); } //if( res_out ) { printf( "%d %d", width, height ); fflush(stdin); } float me = 0.f, mre = 0.f; // maximum error and maximum relative error //int w_ws = (width+WS-1)/WS, h_nw = (height+NW-1)/NW, w_hws = (width+HWS-1)/HWS, w_mnt = (width+MNT-1)/MNT; cudaEvent_t start_device, stop_device; cudaEventCreate(&start_device); cudaEventCreate(&stop_device); //cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); #ifdef RUN_GAUSS_0 { // gauss-0 cudaMemcpy( d_in_img, &h_in_img[0], sizeof(float)*ne, cudaMemcpyHostToDevice ); cudaEventRecord( start_device, 0 ); for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) { gauss0_rows< KERNEL_RADIUS > <<< dim3((width+WS-1)/WS, (height+NW-1)/NW), dim3(WS, NW) >>>( d_out_img, d_in_img ); gauss0_cols< KERNEL_RADIUS > <<< dim3((width+WS-1)/WS, (height+NW-1)/NW), dim3(WS, NW) >>>( d_in_img, d_out_img ); } cudaEventRecord( stop_device, 0 ); cudaEventSynchronize( stop_device ); dt = 0.f; cudaEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f; if( res_out == 1 ) dt /= float(RUN_TIMES); cudaMemcpy( &h_out_img[0], d_in_img, sizeof(float)*ne, cudaMemcpyDeviceToHost ); #ifdef COMPUTE_IN_CPU check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre); #endif if( res_out == 1 ) printf( " %g", ne/(1024.*1024.*dt) ); else if( res_out == 2 ) printf( " %g %g", me, mre ); else printf( "[GPU] gauss0 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) ); } #endif #ifdef RUN_GAUSS_1 { // gauss-1 cudaMemcpyToArray( a_in_img, 0, 0, &h_in_img[0], sizeof(float)*ne, cudaMemcpyHostToDevice ); cudaEventRecord( start_device, 0 ); cudaBindSurfaceToArray( f_out_img, a_out_img ); for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) { cudaBindTextureToArray( t_in_img, a_in_img ); gauss1_rows< KERNEL_RADIUS > <<< dim3((width+WS1-1)/WS1, (height+NW1-1)/NW1), dim3(WS1, NW1) >>>(); // fortunately output surface can be used as input texture afterwards cudaBindTextureToArray( t_in_img, a_out_img ); gauss1_cols< KERNEL_RADIUS > <<< dim3((width+WS1-1)/WS1, (height+NW1-1)/NW1), dim3(WS1, NW1) >>>( d_out_img ); } cudaEventRecord( stop_device, 0 ); cudaEventSynchronize( stop_device ); cudaUnbindTexture( t_in_img ); dt = 0.f; cudaEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f; if( res_out == 1 ) dt /= float(RUN_TIMES); cudaMemcpy( &h_out_img[0], d_out_img, sizeof(float)*ne, cudaMemcpyDeviceToHost ); #ifdef COMPUTE_IN_CPU check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre); #endif if( res_out == 1 ) printf( " %g", ne/(1024.*1024.*dt) ); else if( res_out == 2 ) printf( " %g %g", me, mre ); else printf( "[GPU] gauss1 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) ); } #endif cudaDeviceSetCacheConfig( cudaFuncCachePreferShared ); #ifdef RUN_GAUSS_2 { // gauss-2 cudaMemcpyToArray( a_in_img, 0, 0, &h_in_img[0], sizeof(float)*ne, cudaMemcpyHostToDevice ); cudaEventRecord( start_device, 0 ); cudaBindSurfaceToArray( f_out_img, a_out_img ); for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) { cudaBindTextureToArray( t_in_img, a_in_img ); gauss2_rows< KERNEL_RADIUS > <<< dim3((width+MNT-1)/MNT, height), dim3(MNT, 1) >>>(); cudaBindTextureToArray( t_in_img, a_out_img ); gauss2_cols< KERNEL_RADIUS > <<< dim3((width+WS2-1)/WS2, (height+NW2-1)/NW2), dim3(WS2, NW2) >>>( d_out_img ); } cudaEventRecord( stop_device, 0 ); cudaEventSynchronize( stop_device ); cudaUnbindTexture( t_in_img ); dt = 0.f; cudaEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f; if( res_out == 1 ) dt /= float(RUN_TIMES); cudaMemcpy( &h_out_img[0], d_out_img, sizeof(float)*ne, cudaMemcpyDeviceToHost ); #ifdef COMPUTE_IN_CPU check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre); #endif if( res_out == 1 ) printf( " %g", ne/(1024.*1024.*dt) ); else if( res_out == 2 ) printf( " %g %g", me, mre ); else printf( "[GPU] gauss2 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) ); #ifdef USE_CACTUS if( !res_out ) { printf( "[gauss] Writing %s ... ", CACTUS_GPU ); fflush(stdin); if( save_pgm( h_out_img, width, height, maxval, CACTUS_GPU ) ) printf( "done!\n" ); else { fprintf( stderr, "!!! error!" ); return 1; } } #endif } #endif #ifdef RUN_GAUSS_3 { // gauss-3 cudaMemcpyToArray( a_in_img, 0, 0, &h_in_img[0], sizeof(float)*ne, cudaMemcpyHostToDevice ); cudaEventRecord( start_device, 0 ); cudaBindTextureToArray( t_in_img, a_in_img ); for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) { gauss3< KERNEL_RADIUS > <<< dim3((width+WS3-1)/WS3, (height+NW3-1)/NW3), dim3(WS3, NW3) >>>( d_out_img ); } cudaEventRecord( stop_device, 0 ); cudaEventSynchronize( stop_device ); cudaUnbindTexture( t_in_img ); dt = 0.f; cudaEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f; if( res_out == 1 ) dt /= float(RUN_TIMES); cudaMemcpy( &h_out_img[0], d_out_img, sizeof(float)*ne, cudaMemcpyDeviceToHost ); #ifdef COMPUTE_IN_CPU check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre); #endif if( res_out == 1 ) printf( " %g", ne/(1024.*1024.*dt) ); else if( res_out == 2 ) printf( " %g %g", me, mre ); else printf( "[GPU] gauss3 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) ); } #endif #ifdef RUN_GAUSS_FFT { // gauss-fft dvector< float > d_img( h_in_img ); int fftW = width/2+1, fftH = height; dvector< float2 > d_img_hat( fftW * fftH ); dim3 dim_block( WS, NW ), dim_grid( (fftW+1)/dim_block.x, (fftH+1)/dim_block.y ); cufftHandle planFwd, planInv; cufftPlan2d(&planFwd, width, height, CUFFT_R2C); cufftPlan2d(&planInv, width, height, CUFFT_C2R); cudaEventRecord( start_device, 0 ); for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) { cufftExecR2C(planFwd, d_img, (cufftComplex *)(float2 *)d_img_hat); apply_gauss_hat< KERNEL_RADIUS >(d_img_hat, dim_grid, dim_block ); cufftExecC2R(planInv, (cufftComplex *)(float2 *)d_img_hat, d_img); } cudaEventRecord( stop_device, 0 ); cudaEventSynchronize( stop_device ); dt = 0.f; cudaEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f; if( res_out == 1 ) dt /= float(RUN_TIMES); cufftDestroy(planFwd); cufftDestroy(planInv); h_fft_img = to_cpu(d_img); for (int i=0; i<ne; ++i) h_fft_img[i] /= (float)ne; // gauss-fft calculates the exact convolution, so it might be different // from the cpu reference #ifdef COMPUTE_IN_CPU check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre); #endif if( res_out == 1 ) printf( " %g", ne/(1024.*1024.*dt) ); else if( res_out == 2 ) printf( " %g %g", me, mre ); else printf( "[GPU] gauss-fft done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) ); #ifdef USE_CACTUS if( !res_out ) { printf( "[gauss] Writing %s ... ", CACTUS_FFT_GPU ); fflush(stdin); if( save_pgm( h_fft_img, width, height, maxval, CACTUS_FFT_GPU ) ) printf( "done!\n" ); else { fprintf( stderr, "!!! error!" ); return 1; } } #endif } #endif #ifdef RUN_GAUSS_REC { // gauss-recursive void rec_gauss(float *h_img, int width, int height, float sigma); std::vector<float> h_img = h_in_img; rec_gauss(&h_img[0], width, height, radius/2); #ifdef USE_CACTUS if( !res_out ) { printf( "[gauss] Writing %s ... ", CACTUS_RECURSIVE_GPU ); fflush(stdin); if( save_pgm(h_img, width, height, maxval, CACTUS_RECURSIVE_GPU) ) printf( "done!\n" ); else { fprintf( stderr, "!!! error!" ); return 1; } } #endif } #endif //if( res_out ) printf( "\n" ); cudaEventDestroy(start_device); cudaEventDestroy(stop_device); cudaFreeArray( a_in_img ); cudaFreeArray( a_fin_img ); cudaFreeArray( a_out_img ); cudaFree( d_in_img ); cudaFree( d_out_img ); return 0; }
4c0e58753cf02693403e02bd54d6c041f5cc4b31.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <torch/extension.h> #include <iostream> #include <vector> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <math.h> #include <algorithm> #include <stdlib.h> #include "cpu/vision.h" /*rle cuda kernels are cuda version of the corresponding cpu functions here https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c these are only a subset of rle kernels.*/ typedef unsigned int uint; typedef unsigned long siz; typedef unsigned char byte; //6144 is based on minimum shared memory size per SM //across all pytorch-supported GPUs. Need to use blocking //to avoid this restriction const int BUFFER_SIZE=6144; const int CNTS_SIZE=6144; __global__ void crop_and_scale_cuda_kernel(double *dense_poly_data, int *per_anchor_poly_idx, int *poly_rel_idx, int poly_count, int anchor_count, float4 *anchor_data, int mask_size){ int tid = threadIdx.x; int block_jump = blockDim.x; int poly_id = blockIdx.x; int anchor_idx; for (anchor_idx = 0; anchor_idx < anchor_count; anchor_idx++){ if (poly_id < per_anchor_poly_idx[anchor_idx + 1]) break; } float w = anchor_data[anchor_idx].z - anchor_data[anchor_idx].x; float h = anchor_data[anchor_idx].w - anchor_data[anchor_idx].y; w = fmaxf(w, 1.0f); h = fmaxf(h, 1.0f); float ratio_h = ((float) mask_size) / h; float ratio_w = ((float) mask_size) / w; int poly_ptr_idx_start = poly_rel_idx[poly_id]; int poly_ptr_idx_end = poly_rel_idx[poly_id + 1]; double *poly_data_buf = dense_poly_data + poly_ptr_idx_start; int len = poly_ptr_idx_end - poly_ptr_idx_start; for (int j = tid; j < len; j += block_jump){ if (j % 2 == 0) poly_data_buf[j] = ratio_w*((float) poly_data_buf[j]- anchor_data[anchor_idx].x); if (j % 2 == 1) poly_data_buf[j] = ratio_h*((float) poly_data_buf[j]- anchor_data[anchor_idx].y); } } //merging masks happens on mask format, not RLE format. __global__ void merge_masks_cuda_kernel(byte *masks_in, float *masks_out, const int mask_size, int *per_anchor_poly_idx, int anchor_count){ int anchor_idx = blockIdx.x; int tid = threadIdx.x; int jump_block = blockDim.x; int mask_start_idx = per_anchor_poly_idx[anchor_idx]; int num_of_masks_to_merge = per_anchor_poly_idx[anchor_idx + 1]-per_anchor_poly_idx[anchor_idx]; for(int j = tid; j < mask_size * mask_size; j += jump_block){ int transposed_pixel = (j % mask_size) * mask_size + j / mask_size; byte pixel = 0; for(int k = 0; k < num_of_masks_to_merge; k++){ if (masks_in[(mask_start_idx + k) * mask_size * mask_size + j] == 1) pixel = 1; if (pixel == 1) break; } masks_out[anchor_idx * mask_size * mask_size + transposed_pixel] = (float) pixel; } } /*cuda version of rleDecode function in this API: https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c*/ __global__ void decode_rle_cuda_kernel(const int *num_of_cnts, uint *cnts, long h, long w, byte *mask) { int poly_id = blockIdx.x; int tid = threadIdx.x; int block_jump = blockDim.x; int m = num_of_cnts[poly_id]; uint *cnts_buf = cnts + CNTS_SIZE * poly_id; byte *mask_ptr = mask + poly_id * h * w; __shared__ uint shbuf1[CNTS_SIZE]; __shared__ uint shbuf2[CNTS_SIZE]; //initialize shbuf for scan. first element is 0 (exclusive scan) for (long i = tid; i < CNTS_SIZE; i += block_jump){ shbuf1[i] = (i <= m & i > 0) ? cnts_buf[i - 1]:0; shbuf2[i] = (i <= m & i > 0) ? cnts_buf[i - 1]:0; } __syncthreads(); //double buffering for scan int switch_buf = 0; for (int offset = 1; offset <= m; offset *= 2){ switch_buf = 1 - switch_buf; if(switch_buf == 0){ for(int j = tid;j <= m;j += block_jump){ if(j >= offset) shbuf2[j] = shbuf1[j]+shbuf1[j - offset]; else shbuf2[j] = shbuf1[j]; } }else if (switch_buf == 1){ for(int j = tid;j <= m;j += block_jump){ if(j >= offset) shbuf1[j] = shbuf2[j] + shbuf2[j - offset]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } uint *scanned_buf = switch_buf == 0 ? shbuf2 : shbuf1; //find which bin pixel j falls into , which determines the pixel value //use binary search for(int j = tid; j < h * w; j += block_jump){ int bin = 0; int min_idx = 0; int max_idx = m; int mid_idx = m / 2; while(max_idx > min_idx){ if(j > scanned_buf[mid_idx]) { min_idx = mid_idx+1; mid_idx = (min_idx + max_idx) / 2; } else if (j < scanned_buf[mid_idx]) { max_idx = mid_idx; mid_idx = (min_idx + max_idx) / 2; } else { mid_idx++; break; } } int k = mid_idx; byte pixel = k % 2 == 0 ? 1 : 0; mask_ptr[j] = pixel; } } /*cuda version of rleFrPoly function in this API: https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c*/ __global__ void rle_fr_poly_cuda_kernel(const double *dense_coordinates, int *poly_rel_idx, long h, long w, uint *cnts, int *x_in, int *y_in, int *u_in, int *v_in, uint *a_in, uint *b_in, int *num_of_cnts) { int poly_id = blockIdx.x; int tid = threadIdx.x; int block_jump = blockDim.x; long cnts_offset = poly_id * CNTS_SIZE; long k = (poly_rel_idx[poly_id + 1] - poly_rel_idx[poly_id]) / 2; const double *xy = dense_coordinates + poly_rel_idx[poly_id]; int *x = x_in + poly_id * BUFFER_SIZE; int *y = y_in + poly_id * BUFFER_SIZE; int *u = u_in + poly_id * BUFFER_SIZE; int *v = v_in + poly_id * BUFFER_SIZE; uint *a = a_in + poly_id * BUFFER_SIZE; uint *b = b_in + poly_id * BUFFER_SIZE; /* upsample and get discrete points densely along entire boundary */ long j, m = 0; double scale = 5; __shared__ int shbuf1[BUFFER_SIZE]; __shared__ int shbuf2[BUFFER_SIZE]; for(long j = tid; j < BUFFER_SIZE; j += block_jump) { shbuf1[j] = 0; shbuf2[j] = 0; } for(long j = tid; j <= k; j += block_jump) x[j] = j < k ? ((int) (scale * xy[2 * j + 0] + 0.5)) : ((int) (scale * xy[0] + 0.5)); for(long j = tid; j <= k; j += block_jump) y[j] = j < k ? ((int) (scale * xy[2 * j + 1] + 0.5)) : ((int) (scale * xy[1] + 0.5)); __syncthreads(); for(int j = tid; j < k; j += block_jump){ int xs = x[j], xe = x[j + 1], ys = y[j], ye = y[j + 1], dx, dy, t, d, dist; int flip; double s; dx = abs(xe - xs); dy = abs(ys - ye); flip = (dx >= dy && xs > xe) || (dx < dy && ys > ye); if (flip) {t = xs; xs = xe; xe = t; t = ys; ys = ye; ye = t;} s = dx >= dy ? (double) (ye - ys) / dx : (double) (xe - xs) / dy; dist = dx >= dy ? dx + 1 : dy + 1; shbuf1[j + 1] = dist; shbuf2[j + 1] = dist; } __syncthreads(); //block-wide exclusive prefix scan int switch_buf = 0; for (int offset = 1; offset <= k; offset *= 2){ switch_buf = 1 - switch_buf; if (switch_buf == 0){ for(int j = tid; j <= k; j += block_jump){ if (j >= offset) shbuf2[j] = shbuf1[j] + shbuf1[j - offset]; else shbuf2[j] = shbuf1[j]; } } else if (switch_buf == 1){ for(int j = tid; j <= k; j += block_jump){ if (j >= offset) shbuf1[j] = shbuf2[j] + shbuf2[j - offset]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } for (int j = tid; j < k; j += block_jump){ int xs = x[j], xe = x[j + 1], ys = y[j], ye = y[j + 1], dx, dy, t, d, dist; int flip; double s; dx = __sad(xe, xs, 0); dy = __sad(ys, ye, 0); flip = (dx >= dy && xs > xe) || (dx < dy && ys > ye); if (flip) {t = xs; xs = xe; xe = t; t = ys; ys = ye; ye = t;} s = dx >= dy ? (double) (ye - ys) / dx : (double) (xe - xs) / dy; m = switch_buf == 0 ? shbuf2[j] : shbuf1[j]; if (dx >= dy) for (d = 0; d <= dx; d++) { /*the multiplication statement 's*t' causes nvcc to optimize with flush-to-zero=True for double precision multiply, which we observe produces different results than CPU occasionally. To force flush-to-zero=False, we use __dmul_rn intrinsics function */ t = flip ? dx - d : d; u[m] = t + xs; v[m] = (int) (ys + __dmul_rn(s, t) + .5); m++; } else for (d = 0; d <= dy; d++) { t = flip ? dy - d : d; v[m] = t + ys; u[m] = (int) (xs + __dmul_rn(s, t) + .5); m++; } } __syncthreads(); m = switch_buf == 0 ? shbuf2[k] : shbuf1[k]; int k2 = m; __syncthreads(); double xd, yd; if (tid == 0) { shbuf1[tid] = 0; shbuf2[tid] = 0; } /* get points along y-boundary and downsample */ for (int j = tid; j < k2; j += block_jump){ if (j > 0){ if (u[j] != u[j - 1]){ xd = (double) (u[j] < u[j-1] ? u[j] : u[j] - 1); xd = (xd + .5) / scale - .5; if (floor(xd) != xd || xd < 0 || xd > w - 1 ) { shbuf1[j] = 0; shbuf2[j] = 0; continue; } yd = (double) (v[j] < v[j - 1] ? v[j] : v[j - 1]); yd = (yd + .5) / scale - .5; if (yd < 0) yd = 0; else if (yd > h) yd = h; yd = ceil(yd); shbuf1[j] = 1; shbuf2[j] = 1; } else { shbuf1[j] = 0; shbuf2[j] = 0; } } } __syncthreads(); //exclusive prefix scan switch_buf = 0; for (int offset = 1; offset < k2; offset *= 2){ switch_buf = 1 - switch_buf; if (switch_buf == 0){ for (int j = tid; j < k2; j += block_jump){ if (j >= offset) shbuf2[j] = shbuf1[j - offset] + shbuf1[j]; else shbuf2[j] = shbuf1[j]; } } else if (switch_buf == 1){ for (int j = tid; j < k2; j += block_jump){ if (j >= offset) shbuf1[j] = shbuf2[j - offset] + shbuf2[j]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } for (int j = tid; j < k2; j += block_jump){ if (j > 0){ if(u[j] != u[j - 1]){ xd = (double) (u[j] < u[j - 1] ? u[j] : u[j] - 1); xd = (xd + .5) / scale - .5; if (floor(xd) != xd || xd < 0 || xd > w - 1) {continue;} yd = (double) (v[j] < v[j - 1] ? v[j] : v[j - 1]); yd = (yd + .5) / scale - .5; if (yd < 0) yd = 0; else if (yd > h) yd = h; yd = ceil(yd); m = switch_buf == 0 ? shbuf2[j - 1]:shbuf1[j - 1]; x[m] = (int) xd; y[m] = (int) yd; m++; } } } __syncthreads(); /* compute rle encoding given y-boundary points */ m = switch_buf == 0 ? shbuf2[k2 - 1] : shbuf1[k2 - 1]; int k3 = m; for (int j = tid; j <= k3; j += block_jump){ if (j < k3) a[j] = (uint) (x[j] * (int) (h) + y[j]); else a[j] = (uint)(h * w); } k3++; __syncthreads(); //run brick sort on a for k3+1 element //load k3+1 elements of a into shared memory for(long j = tid; j < k3; j += block_jump) shbuf1[j]=a[j]; __syncthreads(); uint a_temp; for (int r = 0; r <= k3 / 2; r++){ int evenCas = k3 / 2; int oddCas = (k3 - 1) / 2; //start with 0, need (k3+1)/2 CAS for (int j = tid; j < evenCas; j += block_jump){ if (shbuf1[2 * j] > shbuf1[2 * j + 1]){ a_temp = shbuf1[2 * j]; shbuf1[2 * j]=shbuf1[2 * j + 1]; shbuf1[2 * j + 1] = a_temp; } } __syncthreads(); //start with 1 for (int j = tid; j < oddCas; j += block_jump){ if (shbuf1[2 * j + 1] > shbuf1[2 * j + 2]){ a_temp=shbuf1[2 * j + 1]; shbuf1[2 * j + 1] = shbuf1[2 * j + 2]; shbuf1[2 * j + 2]=a_temp; } } __syncthreads(); } for(long j = tid; j < k3; j += block_jump) { if(j>0) shbuf2[j] = shbuf1[j - 1]; else shbuf2[j] = 0; } __syncthreads(); for(int j = tid; j < k3; j += block_jump){ shbuf1[j] -= shbuf2[j]; } __syncthreads(); uint *cnts_buf = cnts + cnts_offset; if (tid == 0){ j = m = 0; cnts_buf[m++] = shbuf1[j++]; while (j < k3) if (shbuf1[j] > 0) cnts_buf[m++] = shbuf1[j++]; else { j++; if (j < k3) cnts_buf[m - 1] += shbuf1[j++]; } num_of_cnts[poly_id] = m; } __syncthreads(); } at::Tensor generate_mask_targets_cuda(at::Tensor dense_vector, const std::vector<std::vector<at::Tensor>> polygons, const at::Tensor anchors, const int mask_size){ const int M = mask_size; assert (M < 32); //if M >=32, shared memory buffer size may not be //sufficient. Need to fix this by blocking float *d_anchor_data = anchors.data_ptr<float>(); int num_of_anchors = anchors.size(0); auto per_anchor_poly_idx = at::empty({num_of_anchors + 1}, at::CPU(at::kInt)); int num_of_poly = 0; for (int i = 0; i < num_of_anchors; i++){ *(per_anchor_poly_idx.data_ptr<int>() + i) = num_of_poly; num_of_poly += polygons[i].size(); } *(per_anchor_poly_idx.data_ptr<int>() + num_of_anchors) = num_of_poly; auto poly_rel_idx = at::empty({num_of_poly + 1}, at::CPU(at::kInt)); double *dense_poly_data = dense_vector.data_ptr<double>(); int start_idx = 0; int poly_count = 0; for(int i = 0; i < polygons.size(); i++){ for(int j=0; j < polygons[i].size(); j++) { *(poly_rel_idx.data_ptr<int>() + poly_count) = start_idx; start_idx += polygons[i][j].size(0); poly_count++; } } *(poly_rel_idx.data_ptr<int>() + poly_count) = start_idx; at::Tensor d_x_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_y_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_u_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_v_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_a_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));//used with uint* pointer at::Tensor d_b_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); //used with uint* pointer at::Tensor d_mask_t = torch::empty({M * M * num_of_poly}, torch::CUDA(at::kByte)); auto result = torch::empty({num_of_anchors, M, M}, torch::CUDA(at::kFloat)); at::Tensor d_num_of_counts_t = torch::empty({num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_cnts_t = torch::empty({CNTS_SIZE * num_of_poly}, torch::CUDA(at::kInt)); auto d_dense_vector = dense_vector.cuda(); auto d_per_anchor_poly_idx = per_anchor_poly_idx.cuda(); auto d_poly_rel_idx = poly_rel_idx.cuda(); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( crop_and_scale_cuda_kernel), dim3(num_of_poly), dim3(256), 0, stream.stream(), d_dense_vector.data_ptr<double>(), d_per_anchor_poly_idx.data_ptr<int>(), d_poly_rel_idx.data_ptr<int>(), poly_count, num_of_anchors, (float4*) d_anchor_data, M); //TODO: larger threads-per-block might be better here, because each CTA uses 32 KB of shmem, //and occupancy is likely shmem capacity bound hipLaunchKernelGGL(( rle_fr_poly_cuda_kernel), dim3(num_of_poly), dim3(1024), 0, stream.stream(), d_dense_vector.data_ptr<double>(), d_poly_rel_idx.data_ptr<int>(), M, M, (uint*) d_cnts_t.data_ptr<int>(), d_x_t.data_ptr<int>(), d_y_t.data_ptr<int>(), d_u_t.data_ptr<int>(), d_v_t.data_ptr<int>(), (uint*) d_a_t.data_ptr<int>(), (uint*) d_b_t.data_ptr<int>(), d_num_of_counts_t.data_ptr<int>()); hipLaunchKernelGGL(( decode_rle_cuda_kernel), dim3(num_of_poly), dim3(256), 0, stream.stream(), d_num_of_counts_t.data_ptr<int>(), (uint*) d_cnts_t.data_ptr<int>(), M, M, d_mask_t.data_ptr<byte>()); hipLaunchKernelGGL(( merge_masks_cuda_kernel), dim3(num_of_anchors), dim3(256), 0, stream.stream(), d_mask_t.data_ptr<byte>(), result.data_ptr<float>(), M, d_per_anchor_poly_idx.data_ptr<int>(), num_of_anchors); return result; }
4c0e58753cf02693403e02bd54d6c041f5cc4b31.cu
/** * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <torch/extension.h> #include <iostream> #include <vector> #include <cuda.h> #include <cuda_runtime.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <math.h> #include <algorithm> #include <stdlib.h> #include "cpu/vision.h" /*rle cuda kernels are cuda version of the corresponding cpu functions here https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c these are only a subset of rle kernels.*/ typedef unsigned int uint; typedef unsigned long siz; typedef unsigned char byte; //6144 is based on minimum shared memory size per SM //across all pytorch-supported GPUs. Need to use blocking //to avoid this restriction const int BUFFER_SIZE=6144; const int CNTS_SIZE=6144; __global__ void crop_and_scale_cuda_kernel(double *dense_poly_data, int *per_anchor_poly_idx, int *poly_rel_idx, int poly_count, int anchor_count, float4 *anchor_data, int mask_size){ int tid = threadIdx.x; int block_jump = blockDim.x; int poly_id = blockIdx.x; int anchor_idx; for (anchor_idx = 0; anchor_idx < anchor_count; anchor_idx++){ if (poly_id < per_anchor_poly_idx[anchor_idx + 1]) break; } float w = anchor_data[anchor_idx].z - anchor_data[anchor_idx].x; float h = anchor_data[anchor_idx].w - anchor_data[anchor_idx].y; w = fmaxf(w, 1.0f); h = fmaxf(h, 1.0f); float ratio_h = ((float) mask_size) / h; float ratio_w = ((float) mask_size) / w; int poly_ptr_idx_start = poly_rel_idx[poly_id]; int poly_ptr_idx_end = poly_rel_idx[poly_id + 1]; double *poly_data_buf = dense_poly_data + poly_ptr_idx_start; int len = poly_ptr_idx_end - poly_ptr_idx_start; for (int j = tid; j < len; j += block_jump){ if (j % 2 == 0) poly_data_buf[j] = ratio_w*((float) poly_data_buf[j]- anchor_data[anchor_idx].x); if (j % 2 == 1) poly_data_buf[j] = ratio_h*((float) poly_data_buf[j]- anchor_data[anchor_idx].y); } } //merging masks happens on mask format, not RLE format. __global__ void merge_masks_cuda_kernel(byte *masks_in, float *masks_out, const int mask_size, int *per_anchor_poly_idx, int anchor_count){ int anchor_idx = blockIdx.x; int tid = threadIdx.x; int jump_block = blockDim.x; int mask_start_idx = per_anchor_poly_idx[anchor_idx]; int num_of_masks_to_merge = per_anchor_poly_idx[anchor_idx + 1]-per_anchor_poly_idx[anchor_idx]; for(int j = tid; j < mask_size * mask_size; j += jump_block){ int transposed_pixel = (j % mask_size) * mask_size + j / mask_size; byte pixel = 0; for(int k = 0; k < num_of_masks_to_merge; k++){ if (masks_in[(mask_start_idx + k) * mask_size * mask_size + j] == 1) pixel = 1; if (pixel == 1) break; } masks_out[anchor_idx * mask_size * mask_size + transposed_pixel] = (float) pixel; } } /*cuda version of rleDecode function in this API: https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c*/ __global__ void decode_rle_cuda_kernel(const int *num_of_cnts, uint *cnts, long h, long w, byte *mask) { int poly_id = blockIdx.x; int tid = threadIdx.x; int block_jump = blockDim.x; int m = num_of_cnts[poly_id]; uint *cnts_buf = cnts + CNTS_SIZE * poly_id; byte *mask_ptr = mask + poly_id * h * w; __shared__ uint shbuf1[CNTS_SIZE]; __shared__ uint shbuf2[CNTS_SIZE]; //initialize shbuf for scan. first element is 0 (exclusive scan) for (long i = tid; i < CNTS_SIZE; i += block_jump){ shbuf1[i] = (i <= m & i > 0) ? cnts_buf[i - 1]:0; shbuf2[i] = (i <= m & i > 0) ? cnts_buf[i - 1]:0; } __syncthreads(); //double buffering for scan int switch_buf = 0; for (int offset = 1; offset <= m; offset *= 2){ switch_buf = 1 - switch_buf; if(switch_buf == 0){ for(int j = tid;j <= m;j += block_jump){ if(j >= offset) shbuf2[j] = shbuf1[j]+shbuf1[j - offset]; else shbuf2[j] = shbuf1[j]; } }else if (switch_buf == 1){ for(int j = tid;j <= m;j += block_jump){ if(j >= offset) shbuf1[j] = shbuf2[j] + shbuf2[j - offset]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } uint *scanned_buf = switch_buf == 0 ? shbuf2 : shbuf1; //find which bin pixel j falls into , which determines the pixel value //use binary search for(int j = tid; j < h * w; j += block_jump){ int bin = 0; int min_idx = 0; int max_idx = m; int mid_idx = m / 2; while(max_idx > min_idx){ if(j > scanned_buf[mid_idx]) { min_idx = mid_idx+1; mid_idx = (min_idx + max_idx) / 2; } else if (j < scanned_buf[mid_idx]) { max_idx = mid_idx; mid_idx = (min_idx + max_idx) / 2; } else { mid_idx++; break; } } int k = mid_idx; byte pixel = k % 2 == 0 ? 1 : 0; mask_ptr[j] = pixel; } } /*cuda version of rleFrPoly function in this API: https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c*/ __global__ void rle_fr_poly_cuda_kernel(const double *dense_coordinates, int *poly_rel_idx, long h, long w, uint *cnts, int *x_in, int *y_in, int *u_in, int *v_in, uint *a_in, uint *b_in, int *num_of_cnts) { int poly_id = blockIdx.x; int tid = threadIdx.x; int block_jump = blockDim.x; long cnts_offset = poly_id * CNTS_SIZE; long k = (poly_rel_idx[poly_id + 1] - poly_rel_idx[poly_id]) / 2; const double *xy = dense_coordinates + poly_rel_idx[poly_id]; int *x = x_in + poly_id * BUFFER_SIZE; int *y = y_in + poly_id * BUFFER_SIZE; int *u = u_in + poly_id * BUFFER_SIZE; int *v = v_in + poly_id * BUFFER_SIZE; uint *a = a_in + poly_id * BUFFER_SIZE; uint *b = b_in + poly_id * BUFFER_SIZE; /* upsample and get discrete points densely along entire boundary */ long j, m = 0; double scale = 5; __shared__ int shbuf1[BUFFER_SIZE]; __shared__ int shbuf2[BUFFER_SIZE]; for(long j = tid; j < BUFFER_SIZE; j += block_jump) { shbuf1[j] = 0; shbuf2[j] = 0; } for(long j = tid; j <= k; j += block_jump) x[j] = j < k ? ((int) (scale * xy[2 * j + 0] + 0.5)) : ((int) (scale * xy[0] + 0.5)); for(long j = tid; j <= k; j += block_jump) y[j] = j < k ? ((int) (scale * xy[2 * j + 1] + 0.5)) : ((int) (scale * xy[1] + 0.5)); __syncthreads(); for(int j = tid; j < k; j += block_jump){ int xs = x[j], xe = x[j + 1], ys = y[j], ye = y[j + 1], dx, dy, t, d, dist; int flip; double s; dx = abs(xe - xs); dy = abs(ys - ye); flip = (dx >= dy && xs > xe) || (dx < dy && ys > ye); if (flip) {t = xs; xs = xe; xe = t; t = ys; ys = ye; ye = t;} s = dx >= dy ? (double) (ye - ys) / dx : (double) (xe - xs) / dy; dist = dx >= dy ? dx + 1 : dy + 1; shbuf1[j + 1] = dist; shbuf2[j + 1] = dist; } __syncthreads(); //block-wide exclusive prefix scan int switch_buf = 0; for (int offset = 1; offset <= k; offset *= 2){ switch_buf = 1 - switch_buf; if (switch_buf == 0){ for(int j = tid; j <= k; j += block_jump){ if (j >= offset) shbuf2[j] = shbuf1[j] + shbuf1[j - offset]; else shbuf2[j] = shbuf1[j]; } } else if (switch_buf == 1){ for(int j = tid; j <= k; j += block_jump){ if (j >= offset) shbuf1[j] = shbuf2[j] + shbuf2[j - offset]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } for (int j = tid; j < k; j += block_jump){ int xs = x[j], xe = x[j + 1], ys = y[j], ye = y[j + 1], dx, dy, t, d, dist; int flip; double s; dx = __sad(xe, xs, 0); dy = __sad(ys, ye, 0); flip = (dx >= dy && xs > xe) || (dx < dy && ys > ye); if (flip) {t = xs; xs = xe; xe = t; t = ys; ys = ye; ye = t;} s = dx >= dy ? (double) (ye - ys) / dx : (double) (xe - xs) / dy; m = switch_buf == 0 ? shbuf2[j] : shbuf1[j]; if (dx >= dy) for (d = 0; d <= dx; d++) { /*the multiplication statement 's*t' causes nvcc to optimize with flush-to-zero=True for double precision multiply, which we observe produces different results than CPU occasionally. To force flush-to-zero=False, we use __dmul_rn intrinsics function */ t = flip ? dx - d : d; u[m] = t + xs; v[m] = (int) (ys + __dmul_rn(s, t) + .5); m++; } else for (d = 0; d <= dy; d++) { t = flip ? dy - d : d; v[m] = t + ys; u[m] = (int) (xs + __dmul_rn(s, t) + .5); m++; } } __syncthreads(); m = switch_buf == 0 ? shbuf2[k] : shbuf1[k]; int k2 = m; __syncthreads(); double xd, yd; if (tid == 0) { shbuf1[tid] = 0; shbuf2[tid] = 0; } /* get points along y-boundary and downsample */ for (int j = tid; j < k2; j += block_jump){ if (j > 0){ if (u[j] != u[j - 1]){ xd = (double) (u[j] < u[j-1] ? u[j] : u[j] - 1); xd = (xd + .5) / scale - .5; if (floor(xd) != xd || xd < 0 || xd > w - 1 ) { shbuf1[j] = 0; shbuf2[j] = 0; continue; } yd = (double) (v[j] < v[j - 1] ? v[j] : v[j - 1]); yd = (yd + .5) / scale - .5; if (yd < 0) yd = 0; else if (yd > h) yd = h; yd = ceil(yd); shbuf1[j] = 1; shbuf2[j] = 1; } else { shbuf1[j] = 0; shbuf2[j] = 0; } } } __syncthreads(); //exclusive prefix scan switch_buf = 0; for (int offset = 1; offset < k2; offset *= 2){ switch_buf = 1 - switch_buf; if (switch_buf == 0){ for (int j = tid; j < k2; j += block_jump){ if (j >= offset) shbuf2[j] = shbuf1[j - offset] + shbuf1[j]; else shbuf2[j] = shbuf1[j]; } } else if (switch_buf == 1){ for (int j = tid; j < k2; j += block_jump){ if (j >= offset) shbuf1[j] = shbuf2[j - offset] + shbuf2[j]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } for (int j = tid; j < k2; j += block_jump){ if (j > 0){ if(u[j] != u[j - 1]){ xd = (double) (u[j] < u[j - 1] ? u[j] : u[j] - 1); xd = (xd + .5) / scale - .5; if (floor(xd) != xd || xd < 0 || xd > w - 1) {continue;} yd = (double) (v[j] < v[j - 1] ? v[j] : v[j - 1]); yd = (yd + .5) / scale - .5; if (yd < 0) yd = 0; else if (yd > h) yd = h; yd = ceil(yd); m = switch_buf == 0 ? shbuf2[j - 1]:shbuf1[j - 1]; x[m] = (int) xd; y[m] = (int) yd; m++; } } } __syncthreads(); /* compute rle encoding given y-boundary points */ m = switch_buf == 0 ? shbuf2[k2 - 1] : shbuf1[k2 - 1]; int k3 = m; for (int j = tid; j <= k3; j += block_jump){ if (j < k3) a[j] = (uint) (x[j] * (int) (h) + y[j]); else a[j] = (uint)(h * w); } k3++; __syncthreads(); //run brick sort on a for k3+1 element //load k3+1 elements of a into shared memory for(long j = tid; j < k3; j += block_jump) shbuf1[j]=a[j]; __syncthreads(); uint a_temp; for (int r = 0; r <= k3 / 2; r++){ int evenCas = k3 / 2; int oddCas = (k3 - 1) / 2; //start with 0, need (k3+1)/2 CAS for (int j = tid; j < evenCas; j += block_jump){ if (shbuf1[2 * j] > shbuf1[2 * j + 1]){ a_temp = shbuf1[2 * j]; shbuf1[2 * j]=shbuf1[2 * j + 1]; shbuf1[2 * j + 1] = a_temp; } } __syncthreads(); //start with 1 for (int j = tid; j < oddCas; j += block_jump){ if (shbuf1[2 * j + 1] > shbuf1[2 * j + 2]){ a_temp=shbuf1[2 * j + 1]; shbuf1[2 * j + 1] = shbuf1[2 * j + 2]; shbuf1[2 * j + 2]=a_temp; } } __syncthreads(); } for(long j = tid; j < k3; j += block_jump) { if(j>0) shbuf2[j] = shbuf1[j - 1]; else shbuf2[j] = 0; } __syncthreads(); for(int j = tid; j < k3; j += block_jump){ shbuf1[j] -= shbuf2[j]; } __syncthreads(); uint *cnts_buf = cnts + cnts_offset; if (tid == 0){ j = m = 0; cnts_buf[m++] = shbuf1[j++]; while (j < k3) if (shbuf1[j] > 0) cnts_buf[m++] = shbuf1[j++]; else { j++; if (j < k3) cnts_buf[m - 1] += shbuf1[j++]; } num_of_cnts[poly_id] = m; } __syncthreads(); } at::Tensor generate_mask_targets_cuda(at::Tensor dense_vector, const std::vector<std::vector<at::Tensor>> polygons, const at::Tensor anchors, const int mask_size){ const int M = mask_size; assert (M < 32); //if M >=32, shared memory buffer size may not be //sufficient. Need to fix this by blocking float *d_anchor_data = anchors.data_ptr<float>(); int num_of_anchors = anchors.size(0); auto per_anchor_poly_idx = at::empty({num_of_anchors + 1}, at::CPU(at::kInt)); int num_of_poly = 0; for (int i = 0; i < num_of_anchors; i++){ *(per_anchor_poly_idx.data_ptr<int>() + i) = num_of_poly; num_of_poly += polygons[i].size(); } *(per_anchor_poly_idx.data_ptr<int>() + num_of_anchors) = num_of_poly; auto poly_rel_idx = at::empty({num_of_poly + 1}, at::CPU(at::kInt)); double *dense_poly_data = dense_vector.data_ptr<double>(); int start_idx = 0; int poly_count = 0; for(int i = 0; i < polygons.size(); i++){ for(int j=0; j < polygons[i].size(); j++) { *(poly_rel_idx.data_ptr<int>() + poly_count) = start_idx; start_idx += polygons[i][j].size(0); poly_count++; } } *(poly_rel_idx.data_ptr<int>() + poly_count) = start_idx; at::Tensor d_x_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_y_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_u_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_v_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_a_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));//used with uint* pointer at::Tensor d_b_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); //used with uint* pointer at::Tensor d_mask_t = torch::empty({M * M * num_of_poly}, torch::CUDA(at::kByte)); auto result = torch::empty({num_of_anchors, M, M}, torch::CUDA(at::kFloat)); at::Tensor d_num_of_counts_t = torch::empty({num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_cnts_t = torch::empty({CNTS_SIZE * num_of_poly}, torch::CUDA(at::kInt)); auto d_dense_vector = dense_vector.cuda(); auto d_per_anchor_poly_idx = per_anchor_poly_idx.cuda(); auto d_poly_rel_idx = poly_rel_idx.cuda(); auto stream = at::cuda::getCurrentCUDAStream(); crop_and_scale_cuda_kernel<<<num_of_poly, 256, 0, stream.stream()>>>(d_dense_vector.data_ptr<double>(), d_per_anchor_poly_idx.data_ptr<int>(), d_poly_rel_idx.data_ptr<int>(), poly_count, num_of_anchors, (float4*) d_anchor_data, M); //TODO: larger threads-per-block might be better here, because each CTA uses 32 KB of shmem, //and occupancy is likely shmem capacity bound rle_fr_poly_cuda_kernel<<<num_of_poly, 1024, 0, stream.stream()>>>(d_dense_vector.data_ptr<double>(), d_poly_rel_idx.data_ptr<int>(), M, M, (uint*) d_cnts_t.data_ptr<int>(), d_x_t.data_ptr<int>(), d_y_t.data_ptr<int>(), d_u_t.data_ptr<int>(), d_v_t.data_ptr<int>(), (uint*) d_a_t.data_ptr<int>(), (uint*) d_b_t.data_ptr<int>(), d_num_of_counts_t.data_ptr<int>()); decode_rle_cuda_kernel<<<num_of_poly, 256, 0, stream.stream()>>>(d_num_of_counts_t.data_ptr<int>(), (uint*) d_cnts_t.data_ptr<int>(), M, M, d_mask_t.data_ptr<byte>()); merge_masks_cuda_kernel<<<num_of_anchors, 256, 0, stream.stream()>>>(d_mask_t.data_ptr<byte>(), result.data_ptr<float>(), M, d_per_anchor_poly_idx.data_ptr<int>(), num_of_anchors); return result; }
28e537cb23722cf6754415f89396f290a764aeef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @precisions normal z -> s d c */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define PRECISION_z __global__ void magma_zlarfg_gpu_kernel( int n, hipDoubleComplex* dx0, hipDoubleComplex* dx, hipDoubleComplex *dtau, double *dxnorm ) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; __shared__ hipDoubleComplex scale; __shared__ double xnorm; hipDoubleComplex dxi; if ( j < n-1) dxi = dx[j]; if ( i == 0 ) { xnorm = *dxnorm; if ( xnorm == 0 ) { *dtau = MAGMA_Z_ZERO; } else { #if (defined(PRECISION_s) || defined(PRECISION_d)) double alpha = *dx0; double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm ); beta = -copysign( beta, alpha ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = (beta - alpha) / beta; *dx0 = beta; scale = 1 / (alpha - beta); #else hipDoubleComplex alpha = *dx0; double alphar = MAGMA_Z_REAL(alpha), alphai = MAGMA_Z_IMAG(alpha); double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm ); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta); *dx0 = MAGMA_Z_MAKE(beta, 0.); alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha)); scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha); #endif } } // scale x __syncthreads(); if ( xnorm != 0 && j < n-1) dx[j] = MAGMA_Z_MUL(dxi, scale); } /* Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = norm( [dx0, dx] ). Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. */ extern "C" void magma_zlarfg_gpu(int n, hipDoubleComplex *dx0, hipDoubleComplex *dx, hipDoubleComplex *dtau, double *dxnorm) { dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE); dim3 threads( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_zlarfg_gpu_kernel), dim3(blocks), dim3(threads) , 0, 0, n, dx0, dx, dtau, dxnorm ); }
28e537cb23722cf6754415f89396f290a764aeef.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @precisions normal z -> s d c */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define PRECISION_z __global__ void magma_zlarfg_gpu_kernel( int n, cuDoubleComplex* dx0, cuDoubleComplex* dx, cuDoubleComplex *dtau, double *dxnorm ) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; __shared__ cuDoubleComplex scale; __shared__ double xnorm; cuDoubleComplex dxi; if ( j < n-1) dxi = dx[j]; if ( i == 0 ) { xnorm = *dxnorm; if ( xnorm == 0 ) { *dtau = MAGMA_Z_ZERO; } else { #if (defined(PRECISION_s) || defined(PRECISION_d)) double alpha = *dx0; double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm ); beta = -copysign( beta, alpha ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = (beta - alpha) / beta; *dx0 = beta; scale = 1 / (alpha - beta); #else cuDoubleComplex alpha = *dx0; double alphar = MAGMA_Z_REAL(alpha), alphai = MAGMA_Z_IMAG(alpha); double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm ); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta); *dx0 = MAGMA_Z_MAKE(beta, 0.); alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha)); scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha); #endif } } // scale x __syncthreads(); if ( xnorm != 0 && j < n-1) dx[j] = MAGMA_Z_MUL(dxi, scale); } /* Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = ±norm( [dx0, dx] ). Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. */ extern "C" void magma_zlarfg_gpu(int n, cuDoubleComplex *dx0, cuDoubleComplex *dx, cuDoubleComplex *dtau, double *dxnorm) { dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE); dim3 threads( BLOCK_SIZE ); magma_zlarfg_gpu_kernel<<< blocks, threads >>>( n, dx0, dx, dtau, dxnorm ); }
1632c1ec4b0605fed526370c645d62a203c0f446.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <linalg/gemv.h> #include <rocblas.h> #include <math.h> #include <layers/multi_cross_layer.hpp> #include <linalg/binary_op.cuh> #include <linalg/gemm.cuh> #include <linalg/matrix_vector_op.cuh> #include <linalg/reduce.cuh> #include <prims/linalg/matrix_multiplication.cuh> #include <prims/cuda_utils.cuh> #include <utils.cuh> #include <utils.hpp> #include <vector> namespace HugeCTR { // kernels namespace { inline int calc_grid(int t, int b) { return (t - 1) / b + 1; } template <typename T> void matrix_vec_mul(Tensor2<T>& out, const Tensor2<T>& mat, const Tensor2<T>& vec, hipblasHandle_t cublas_handle, hipStream_t stream); template <> void matrix_vec_mul(Tensor2<float>& out, const Tensor2<float>& mat, const Tensor2<float>& vec, hipblasHandle_t cublas_handle, hipStream_t stream) { float* pout = out.get_ptr(); const float* pmat = mat.get_ptr(); const float* pvec = vec.get_ptr(); const auto& dim = out.get_dimensions(); const auto& idim = mat.get_dimensions(); assert(dim.size() == 2 && idim.size() == 2 && idim[1] == vec.get_dimensions()[1] && vec.get_dimensions()[0] == 1); assert(idim[0] == dim[0]); const int h = idim[0]; const int w = idim[1]; const float alpha = 1.0f; const float beta = 0.0f; CUBLAS_CHECK(hipblasSetStream(cublas_handle, stream)); CUBLAS_CHECK(hipblasSgemm(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, h, 1, w, &alpha, pmat, w, pvec, w, &beta, pout, h)); } template <> void matrix_vec_mul(Tensor2<__half>& out, const Tensor2<__half>& mat, const Tensor2<__half>& vec, hipblasHandle_t cublas_handle, hipStream_t stream) { __half* pout = out.get_ptr(); const __half* pmat = mat.get_ptr(); const __half* pvec = vec.get_ptr(); const auto& dim = out.get_dimensions(); const auto& idim = mat.get_dimensions(); assert(dim.size() == 2 && idim.size() == 2 && idim[1] == vec.get_dimensions()[1] && vec.get_dimensions()[0] == 1); assert(idim[0] == dim[0]); const int h = idim[0]; const int w = idim[1]; const __half alpha = 1.0f; const __half beta = 0.0f; CUBLAS_CHECK(hipblasSetStream(cublas_handle, stream)); CUBLAS_CHECK(hipblasHgemm(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, h, 1, w, &alpha, pmat, w, pvec, w, &beta, pout, h)); } template <typename T> void row_scaling(Tensor2<T>& o_mat, const Tensor2<T>& mat, const Tensor2<T>& vec, hipStream_t stream) { T* pout = o_mat.get_ptr(); const T* pmat = mat.get_ptr(); const T* pvec = vec.get_ptr(); const auto& dim = o_mat.get_dimensions(); const auto& idim = mat.get_dimensions(); assert(dim.size() == 2 && idim.size() == 2 && dim[0] == vec.get_dimensions()[0] && vec.get_dimensions()[1] == 1); assert(idim[0] == dim[0] && idim[1] == dim[1]); const int h = dim[0]; const int w = dim[1]; MLCommon::LinAlg::matrixVectorOp(pout, pmat, pvec, h, w, false, true, [] __device__(T a, T b) { return a * b; }, stream); } template <typename T> void matrix_vec_add(Tensor2<T>& o_mat, const Tensor2<T>& mat, const Tensor2<T>& vec, hipStream_t stream) { T* pout = o_mat.get_ptr(); const T* pmat = mat.get_ptr(); const T* pvec = vec.get_ptr(); const auto& dim = o_mat.get_dimensions(); const auto& idim = mat.get_dimensions(); assert(dim.size() == 2 && idim.size() == 2 && dim[1] == vec.get_dimensions()[1] && vec.get_dimensions()[0] == 1); assert(idim[0] == dim[0] && idim[1] == dim[1]); const int h = dim[0]; const int w = dim[1]; MLCommon::LinAlg::matrixVectorOp(pout, pmat, pvec, h, w, false, false, [] __device__(T a, T b) { return a + b; }, stream); } template <typename T> void matrix_add(Tensor2<T>& out_mat, const Tensor2<T>& mat_a, const Tensor2<T>& mat_b, hipStream_t stream) { T* pout = out_mat.get_ptr(); const T* pmat_a = mat_a.get_ptr(); const T* pmat_b = mat_b.get_ptr(); const auto& dim = out_mat.get_dimensions(); const auto& idim1 = mat_a.get_dimensions(); const auto& idim2 = mat_b.get_dimensions(); assert(idim1[0] == dim[0] && idim1[1] == dim[1]); assert(idim2[0] == dim[0] && idim2[1] == dim[1]); const int h = dim[0]; const int w = dim[1]; MLCommon::LinAlg::binaryOp(pout, pmat_a, pmat_b, h * w, [] __device__(T a, T b) { return a + b; }, stream); } /** * compute dot product for each pair of the rows in the two matrix, */ template <typename T> __global__ void matrix_pair_mul_kernel(T* o_vec, const T* mat_a, int h, int w, const T* mat_b) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int wtid = tid % WARP_SIZE; // thread id in warp const int wid = tid / WARP_SIZE; // warp id const T* mat_a_with_offset = mat_a + wid * w; const T* mat_b_with_offset = mat_b + wid * w; if (wid < h) { T accum = 0.f; for (int i = wtid; i < w; i += WARP_SIZE) { accum += mat_a_with_offset[i] * mat_b_with_offset[i]; } T val = warpReduceSum(accum); if (wtid == 0) { o_vec[wid] = val; } } } template <typename T> void matrix_pair_mul(Tensor2<T>& o_vec, const Tensor2<T>& mat_a, const Tensor2<T>& mat_b, hipStream_t stream) { T* pout = o_vec.get_ptr(); const T* pmat_a = mat_a.get_ptr(); const T* pmat_b = mat_b.get_ptr(); const auto& dim = mat_a.get_dimensions(); const int h = dim[0]; const int w = dim[1]; assert(h == mat_b.get_dimensions()[0] && w == mat_a.get_dimensions()[1] && h == o_vec.get_dimensions()[0] && 1 == o_vec.get_dimensions()[1]); const int BLOCK_DIM = 256; const int GRID_DIM = calc_grid(h * WARP_SIZE, BLOCK_DIM); hipLaunchKernelGGL(( matrix_pair_mul_kernel), dim3(GRID_DIM), dim3(BLOCK_DIM), 0, stream, pout, pmat_a, h, w, pmat_b); } template <typename T> __global__ void mm_1d(T* out_mat, const T* vec_a, int h, const T* vec_b, int w) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < h * w) { const int col = tid % w; const int row = tid / w; out_mat[tid] = vec_a[row] * vec_b[col]; } } template <typename T> void out_product(Tensor2<T>& out_mat, const Tensor2<T>& vec_a, const Tensor2<T>& vec_b, hipStream_t stream) { T* pout = out_mat.get_ptr(); const T* pvec_a = vec_a.get_ptr(); const T* pvec_b = vec_b.get_ptr(); const auto& dim = out_mat.get_dimensions(); const int h = dim[0]; const int w = dim[1]; assert(h == vec_a.get_dimensions()[0] && w == vec_b.get_dimensions()[1] && vec_a.get_dimensions()[1] == 1 && vec_b.get_dimensions()[0] == 1); const int BLOCK_DIM = 256; const int GRID_DIM = calc_grid(h * w, BLOCK_DIM); hipLaunchKernelGGL(( mm_1d), dim3(GRID_DIM), dim3(BLOCK_DIM), 0, stream, pout, pvec_a, h, pvec_b, w); } /** * Each row in `mat` scale with the coresponding element in vec. and accum across rows * The length of vec should be h. * @param o_mat: hxw * @param mat: hxw * @param vec: hx1 */ template <typename T> __global__ void row_scaling_sum_kernel(T* out, const T* mat, int h, int w, const T* vec) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int wtid = tid % WARP_SIZE; // thread id in warp const int wid = tid / WARP_SIZE; // warp id if (wid < w) { T accum = 0.f; for (int i = wtid; i < h; i += WARP_SIZE) { const int col = wid; const int idx = i * w + col; accum += mat[idx] * vec[i]; } T val = warpReduceSum(accum); if (wtid == 0) { out[wid] += val; // using += here to enable regularization } } } template <typename T> void row_scaling_sum(Tensor2<T>& out, const Tensor2<T>& mat, const Tensor2<T>& vec, hipStream_t stream) { T* pout = out.get_ptr(); const T* pmat = mat.get_ptr(); const T* pvec = vec.get_ptr(); const auto& dim = out.get_dimensions(); const auto& idim = mat.get_dimensions(); assert(dim.size() == 2 && idim.size() == 2 && idim[0] == vec.get_dimensions()[0] && vec.get_dimensions()[1] == 1); assert(idim[1] == dim[1]); const int h = idim[0]; const int w = idim[1]; const int BLOCK_DIM = 256; const int GRID_DIM = calc_grid(w * WARP_SIZE, BLOCK_DIM); // each col one warp hipLaunchKernelGGL(( row_scaling_sum_kernel), dim3(GRID_DIM), dim3(BLOCK_DIM), 0, stream, pout, pmat, h, w, pvec); } template <typename T> void rows_sum(Tensor2<T>& out, const Tensor2<T>& mat, hipStream_t stream) { T* pout = out.get_ptr(); const T* pmat = mat.get_ptr(); const auto& dim = out.get_dimensions(); const auto& idim = mat.get_dimensions(); assert(dim.size() == 2 && idim.size() == 2); assert(idim[1] == dim[1]); const int h = idim[0]; const int w = idim[1]; MLCommon::LinAlg::reduce(pout, pmat, h, w, (T)0, false, true, stream, false, [] __device__(T in, int i) { return in; }); } } // namespace /* * Equivalent TensorFlow Code: * def forward(x, k, b, layers): y = [] h = [] for i in range(layers): v = tf.linalg.matvec(x if i == 0 else y[i - 1], k[i]) v = tf.transpose(v) h.append(v) m = tf.multiply(x, v) m = tf.add(m, x if i == 0 else y[i - 1]) m = tf.add(m, b[i]) y.append(m) return y, h * */ template <typename T> void MultiCrossForwardFunctor<T>::operator()(hipStream_t stream, hipblasHandle_t cublas_handle, const Tensor2<T>& input_tensor, const Tensors2<T>& kernel_tensors, const Tensors2<T>& bias_tensors, Tensors2<T>& layer_output_tensors, Tensors2<T>& layer_hidden_tensors, int num_layers) const { for (int i = 0; i < num_layers; i++) { matrix_vec_mul(layer_hidden_tensors[i], i == 0 ? input_tensor : layer_output_tensors[i - 1], kernel_tensors[i], cublas_handle, stream); row_scaling(layer_output_tensors[i], input_tensor, layer_hidden_tensors[i], stream); matrix_add(layer_output_tensors[i], layer_output_tensors[i], i == 0 ? input_tensor : layer_output_tensors[i - 1], stream); matrix_vec_add(layer_output_tensors[i], layer_output_tensors[i], bias_tensors[i], stream); } } /* * Equivalent TensorFlow Code: * def backward(x, k, y, h, dy, layers): dx = tf.zeros(x.shape) dk = [] db = [] for i in reversed(range(layers)): dx = tf.add(dx, tf.multiply(dy, h[i])) dv = tf.expand_dims(tf.reduce_sum(tf.multiply(dy, x), 1), 1) dk.insert(0, tf.linalg.matvec(x if i == 0 else y[i - 1], tf.transpose(dv), transpose_a=True)) db.insert(0, tf.expand_dims(tf.reduce_sum(dy, 0), 0)) dy = tf.add(dy, tf.matmul(dv, k[i])) dx = tf.add(dx, dy) return dx, dk, db * */ template <typename T> void MultiCrossBackwardFunctor<T>::operator()( hipStream_t stream, const Tensor2<T>& input_tensor, const Tensors2<T>& kernel_tensors, const Tensors2<T>& layer_output_tensors, const Tensors2<T>& layer_hidden_tensors, const Tensor2<T>& grad_tensor, Tensor2<T>& output_tensor, Tensors2<T>& kernel_output_tensors, Tensors2<T>& bias_output_tensors, Tensor2<T>& tmp_vec_tensor, Tensor2<T> tmp_mat_tensors[], int num_layers) const { hipMemsetAsync(tmp_mat_tensors[2].get_ptr(), 0, tmp_mat_tensors[2].get_size_in_bytes(), stream); for (int i = num_layers - 1; i >= 0; i--) { row_scaling(tmp_mat_tensors[0], i == num_layers - 1 ? grad_tensor : tmp_mat_tensors[1], layer_hidden_tensors[i], stream); matrix_add(tmp_mat_tensors[2], tmp_mat_tensors[2], tmp_mat_tensors[0], stream); matrix_pair_mul(tmp_vec_tensor, i == num_layers - 1 ? grad_tensor : tmp_mat_tensors[1], input_tensor, stream); row_scaling_sum(kernel_output_tensors[i], i == 0 ? input_tensor : layer_output_tensors[i - 1], tmp_vec_tensor, stream); rows_sum(bias_output_tensors[i], i == num_layers - 1 ? grad_tensor : tmp_mat_tensors[1], stream); out_product(tmp_mat_tensors[0], tmp_vec_tensor, kernel_tensors[i], stream); matrix_add(tmp_mat_tensors[1], i == num_layers - 1 ? grad_tensor : tmp_mat_tensors[1], tmp_mat_tensors[0], stream); } matrix_add(output_tensor, tmp_mat_tensors[2], tmp_mat_tensors[1], stream); } template <typename T> MultiCrossLayer<T>::MultiCrossLayer(const std::shared_ptr<BufferBlock2<T>>& weight_buff, const std::shared_ptr<BufferBlock2<T>>& wgrad_buff, const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff, const Tensor2<T>& in_tensor, const Tensor2<T>& out_tensor, const std::shared_ptr<GPUResource>& gpu_resource, int num_layers, std::vector<Initializer_t> initializer_types) : Layer(gpu_resource, initializer_types), num_layers_(num_layers) { try { // check the in_tensor and out_tensor const auto& in_tensor_dim = in_tensor.get_dimensions(); const auto& out_tensor_dim = out_tensor.get_dimensions(); // 1. two dim? if (in_tensor_dim.size() != 2 || out_tensor_dim.size() != 2) { CK_THROW_(Error_t::WrongInput, "input or output tensor doesn't has two dimensions"); } // 2. same dim? for (int i = 0; i < 2; i++) { if (in_tensor_dim[i] != out_tensor_dim[i]) { CK_THROW_(Error_t::WrongInput, "input and output tensor doesn't match"); } } size_t vec_length = in_tensor_dim[1]; size_t batchsize = in_tensor_dim[0]; // check num_lyaers if (num_layers < 1) { CK_THROW_(Error_t::WrongInput, "num_layers < 1"); } std::vector<size_t> weight_bias_dim = {1, vec_length}; for (int i = 0; i < num_layers; i++) { // setup weights { Tensor2<T> tensor; weight_buff->reserve(weight_bias_dim, &tensor); weights_.push_back(tensor); } // setup bias { Tensor2<T> tensor; weight_buff->reserve(weight_bias_dim, &tensor); weights_.push_back(tensor); } // setup weight gradient { Tensor2<T> tensor; wgrad_buff->reserve(weight_bias_dim, &tensor); wgrad_.push_back(tensor); } // setup bias gradient { Tensor2<T> tensor; wgrad_buff->reserve(weight_bias_dim, &tensor); wgrad_.push_back(tensor); } } in_tensors_.push_back(in_tensor); out_tensors_.push_back(out_tensor); // setup blobs std::vector<size_t> blob_dim = {batchsize, vec_length}; blob_tensors_.push_back(in_tensor); for (int i = 0; i < num_layers - 1; i++) { Tensor2<T> tensor; blobs_buff->reserve(blob_dim, &tensor); blob_tensors_.push_back(tensor); } blob_tensors_.push_back(out_tensor); for (int i = 0; i < 3; i++) { blobs_buff->reserve(blob_dim, &tmp_mat_tensors_[i]); } std::vector<size_t> tmp_vec_dim = {batchsize, 1}; blobs_buff->reserve(tmp_vec_dim, &tmp_vec_tensor_); for (int i = 0; i < num_layers; i++) { Tensor2<T> tensor; blobs_buff->reserve(tmp_vec_dim, &tensor); vec_tensors_.push_back(tensor); } } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; throw; } } template <typename T> void MultiCrossLayer<T>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); Tensors2<T> kernel_tensors; Tensors2<T> bias_tensors; Tensors2<T> output_tensors; Tensors2<T> hidden_tensors; for (int i = 0; i < num_layers_; i++) { kernel_tensors.push_back(weights_[2 * i]); bias_tensors.push_back(weights_[2 * i + 1]); } for (int i = 0; i < num_layers_; i++) { output_tensors.push_back(blob_tensors_[i + 1]); hidden_tensors.push_back(vec_tensors_[i]); } MultiCrossForwardFunctor<T>()(get_gpu().get_stream(), get_gpu().get_cublas_handle(), blob_tensors_[0], kernel_tensors, bias_tensors, output_tensors, hidden_tensors, num_layers_); } template <typename T> void MultiCrossLayer<T>::bprop() { CudaDeviceContext context(get_device_id()); Tensors2<T> kernel_tensors; Tensors2<T> kernel_output_tensors; Tensors2<T> bias_output_tensors; Tensors2<T> forward_output_tensors; Tensors2<T> forward_hidden_tensors; for (int i = 0; i < num_layers_; i++) { kernel_tensors.push_back(weights_[2 * i]); kernel_output_tensors.push_back(wgrad_[2 * i]); bias_output_tensors.push_back(wgrad_[2 * i + 1]); forward_hidden_tensors.push_back(vec_tensors_[i]); } for (int i = 0; i < num_layers_ - 1; i++) { forward_output_tensors.push_back(blob_tensors_[i + 1]); } MultiCrossBackwardFunctor<T>()(get_gpu().get_stream(), blob_tensors_[0], kernel_tensors, forward_output_tensors, forward_hidden_tensors, blob_tensors_[num_layers_], blob_tensors_[0], kernel_output_tensors, bias_output_tensors, tmp_vec_tensor_, tmp_mat_tensors_, num_layers_); } template <typename T> std::unique_ptr<DataSimulator> MultiCrossLayer<T>::get_default_initializer(const int index) { const Tensor2<T>& in_tensor = in_tensors_[0]; const Tensor2<T>& out_tensor = out_tensors_[0]; float bottom_dim = in_tensor.get_dimensions()[1]; float top_dim = out_tensor.get_dimensions()[1]; std::unique_ptr<DataSimulator> simu(nullptr); if (0 == index) { simu.reset(new VarianceScalingSimulator( 1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Uniform, bottom_dim, top_dim)); } else if (1 == index) { simu.reset(new ConstantDataSimulator(0.0f)); } else { CK_THROW_(Error_t::OutOfBound, "index != {0, 1}."); } return simu; } template class MultiCrossLayer<float>; template class MultiCrossLayer<__half>; } // namespace HugeCTR
1632c1ec4b0605fed526370c645d62a203c0f446.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <linalg/gemv.h> #include <cublas_v2.h> #include <math.h> #include <layers/multi_cross_layer.hpp> #include <linalg/binary_op.cuh> #include <linalg/gemm.cuh> #include <linalg/matrix_vector_op.cuh> #include <linalg/reduce.cuh> #include <prims/linalg/matrix_multiplication.cuh> #include <prims/cuda_utils.cuh> #include <utils.cuh> #include <utils.hpp> #include <vector> namespace HugeCTR { // kernels namespace { inline int calc_grid(int t, int b) { return (t - 1) / b + 1; } template <typename T> void matrix_vec_mul(Tensor2<T>& out, const Tensor2<T>& mat, const Tensor2<T>& vec, cublasHandle_t cublas_handle, cudaStream_t stream); template <> void matrix_vec_mul(Tensor2<float>& out, const Tensor2<float>& mat, const Tensor2<float>& vec, cublasHandle_t cublas_handle, cudaStream_t stream) { float* pout = out.get_ptr(); const float* pmat = mat.get_ptr(); const float* pvec = vec.get_ptr(); const auto& dim = out.get_dimensions(); const auto& idim = mat.get_dimensions(); assert(dim.size() == 2 && idim.size() == 2 && idim[1] == vec.get_dimensions()[1] && vec.get_dimensions()[0] == 1); assert(idim[0] == dim[0]); const int h = idim[0]; const int w = idim[1]; const float alpha = 1.0f; const float beta = 0.0f; CUBLAS_CHECK(cublasSetStream(cublas_handle, stream)); CUBLAS_CHECK(cublasSgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, h, 1, w, &alpha, pmat, w, pvec, w, &beta, pout, h)); } template <> void matrix_vec_mul(Tensor2<__half>& out, const Tensor2<__half>& mat, const Tensor2<__half>& vec, cublasHandle_t cublas_handle, cudaStream_t stream) { __half* pout = out.get_ptr(); const __half* pmat = mat.get_ptr(); const __half* pvec = vec.get_ptr(); const auto& dim = out.get_dimensions(); const auto& idim = mat.get_dimensions(); assert(dim.size() == 2 && idim.size() == 2 && idim[1] == vec.get_dimensions()[1] && vec.get_dimensions()[0] == 1); assert(idim[0] == dim[0]); const int h = idim[0]; const int w = idim[1]; const __half alpha = 1.0f; const __half beta = 0.0f; CUBLAS_CHECK(cublasSetStream(cublas_handle, stream)); CUBLAS_CHECK(cublasHgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, h, 1, w, &alpha, pmat, w, pvec, w, &beta, pout, h)); } template <typename T> void row_scaling(Tensor2<T>& o_mat, const Tensor2<T>& mat, const Tensor2<T>& vec, cudaStream_t stream) { T* pout = o_mat.get_ptr(); const T* pmat = mat.get_ptr(); const T* pvec = vec.get_ptr(); const auto& dim = o_mat.get_dimensions(); const auto& idim = mat.get_dimensions(); assert(dim.size() == 2 && idim.size() == 2 && dim[0] == vec.get_dimensions()[0] && vec.get_dimensions()[1] == 1); assert(idim[0] == dim[0] && idim[1] == dim[1]); const int h = dim[0]; const int w = dim[1]; MLCommon::LinAlg::matrixVectorOp(pout, pmat, pvec, h, w, false, true, [] __device__(T a, T b) { return a * b; }, stream); } template <typename T> void matrix_vec_add(Tensor2<T>& o_mat, const Tensor2<T>& mat, const Tensor2<T>& vec, cudaStream_t stream) { T* pout = o_mat.get_ptr(); const T* pmat = mat.get_ptr(); const T* pvec = vec.get_ptr(); const auto& dim = o_mat.get_dimensions(); const auto& idim = mat.get_dimensions(); assert(dim.size() == 2 && idim.size() == 2 && dim[1] == vec.get_dimensions()[1] && vec.get_dimensions()[0] == 1); assert(idim[0] == dim[0] && idim[1] == dim[1]); const int h = dim[0]; const int w = dim[1]; MLCommon::LinAlg::matrixVectorOp(pout, pmat, pvec, h, w, false, false, [] __device__(T a, T b) { return a + b; }, stream); } template <typename T> void matrix_add(Tensor2<T>& out_mat, const Tensor2<T>& mat_a, const Tensor2<T>& mat_b, cudaStream_t stream) { T* pout = out_mat.get_ptr(); const T* pmat_a = mat_a.get_ptr(); const T* pmat_b = mat_b.get_ptr(); const auto& dim = out_mat.get_dimensions(); const auto& idim1 = mat_a.get_dimensions(); const auto& idim2 = mat_b.get_dimensions(); assert(idim1[0] == dim[0] && idim1[1] == dim[1]); assert(idim2[0] == dim[0] && idim2[1] == dim[1]); const int h = dim[0]; const int w = dim[1]; MLCommon::LinAlg::binaryOp(pout, pmat_a, pmat_b, h * w, [] __device__(T a, T b) { return a + b; }, stream); } /** * compute dot product for each pair of the rows in the two matrix, */ template <typename T> __global__ void matrix_pair_mul_kernel(T* o_vec, const T* mat_a, int h, int w, const T* mat_b) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int wtid = tid % WARP_SIZE; // thread id in warp const int wid = tid / WARP_SIZE; // warp id const T* mat_a_with_offset = mat_a + wid * w; const T* mat_b_with_offset = mat_b + wid * w; if (wid < h) { T accum = 0.f; for (int i = wtid; i < w; i += WARP_SIZE) { accum += mat_a_with_offset[i] * mat_b_with_offset[i]; } T val = warpReduceSum(accum); if (wtid == 0) { o_vec[wid] = val; } } } template <typename T> void matrix_pair_mul(Tensor2<T>& o_vec, const Tensor2<T>& mat_a, const Tensor2<T>& mat_b, cudaStream_t stream) { T* pout = o_vec.get_ptr(); const T* pmat_a = mat_a.get_ptr(); const T* pmat_b = mat_b.get_ptr(); const auto& dim = mat_a.get_dimensions(); const int h = dim[0]; const int w = dim[1]; assert(h == mat_b.get_dimensions()[0] && w == mat_a.get_dimensions()[1] && h == o_vec.get_dimensions()[0] && 1 == o_vec.get_dimensions()[1]); const int BLOCK_DIM = 256; const int GRID_DIM = calc_grid(h * WARP_SIZE, BLOCK_DIM); matrix_pair_mul_kernel<<<GRID_DIM, BLOCK_DIM, 0, stream>>>(pout, pmat_a, h, w, pmat_b); } template <typename T> __global__ void mm_1d(T* out_mat, const T* vec_a, int h, const T* vec_b, int w) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < h * w) { const int col = tid % w; const int row = tid / w; out_mat[tid] = vec_a[row] * vec_b[col]; } } template <typename T> void out_product(Tensor2<T>& out_mat, const Tensor2<T>& vec_a, const Tensor2<T>& vec_b, cudaStream_t stream) { T* pout = out_mat.get_ptr(); const T* pvec_a = vec_a.get_ptr(); const T* pvec_b = vec_b.get_ptr(); const auto& dim = out_mat.get_dimensions(); const int h = dim[0]; const int w = dim[1]; assert(h == vec_a.get_dimensions()[0] && w == vec_b.get_dimensions()[1] && vec_a.get_dimensions()[1] == 1 && vec_b.get_dimensions()[0] == 1); const int BLOCK_DIM = 256; const int GRID_DIM = calc_grid(h * w, BLOCK_DIM); mm_1d<<<GRID_DIM, BLOCK_DIM, 0, stream>>>(pout, pvec_a, h, pvec_b, w); } /** * Each row in `mat` scale with the coresponding element in vec. and accum across rows * The length of vec should be h. * @param o_mat: hxw * @param mat: hxw * @param vec: hx1 */ template <typename T> __global__ void row_scaling_sum_kernel(T* out, const T* mat, int h, int w, const T* vec) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int wtid = tid % WARP_SIZE; // thread id in warp const int wid = tid / WARP_SIZE; // warp id if (wid < w) { T accum = 0.f; for (int i = wtid; i < h; i += WARP_SIZE) { const int col = wid; const int idx = i * w + col; accum += mat[idx] * vec[i]; } T val = warpReduceSum(accum); if (wtid == 0) { out[wid] += val; // using += here to enable regularization } } } template <typename T> void row_scaling_sum(Tensor2<T>& out, const Tensor2<T>& mat, const Tensor2<T>& vec, cudaStream_t stream) { T* pout = out.get_ptr(); const T* pmat = mat.get_ptr(); const T* pvec = vec.get_ptr(); const auto& dim = out.get_dimensions(); const auto& idim = mat.get_dimensions(); assert(dim.size() == 2 && idim.size() == 2 && idim[0] == vec.get_dimensions()[0] && vec.get_dimensions()[1] == 1); assert(idim[1] == dim[1]); const int h = idim[0]; const int w = idim[1]; const int BLOCK_DIM = 256; const int GRID_DIM = calc_grid(w * WARP_SIZE, BLOCK_DIM); // each col one warp row_scaling_sum_kernel<<<GRID_DIM, BLOCK_DIM, 0, stream>>>(pout, pmat, h, w, pvec); } template <typename T> void rows_sum(Tensor2<T>& out, const Tensor2<T>& mat, cudaStream_t stream) { T* pout = out.get_ptr(); const T* pmat = mat.get_ptr(); const auto& dim = out.get_dimensions(); const auto& idim = mat.get_dimensions(); assert(dim.size() == 2 && idim.size() == 2); assert(idim[1] == dim[1]); const int h = idim[0]; const int w = idim[1]; MLCommon::LinAlg::reduce(pout, pmat, h, w, (T)0, false, true, stream, false, [] __device__(T in, int i) { return in; }); } } // namespace /* * Equivalent TensorFlow Code: * def forward(x, k, b, layers): y = [] h = [] for i in range(layers): v = tf.linalg.matvec(x if i == 0 else y[i - 1], k[i]) v = tf.transpose(v) h.append(v) m = tf.multiply(x, v) m = tf.add(m, x if i == 0 else y[i - 1]) m = tf.add(m, b[i]) y.append(m) return y, h * */ template <typename T> void MultiCrossForwardFunctor<T>::operator()(cudaStream_t stream, cublasHandle_t cublas_handle, const Tensor2<T>& input_tensor, const Tensors2<T>& kernel_tensors, const Tensors2<T>& bias_tensors, Tensors2<T>& layer_output_tensors, Tensors2<T>& layer_hidden_tensors, int num_layers) const { for (int i = 0; i < num_layers; i++) { matrix_vec_mul(layer_hidden_tensors[i], i == 0 ? input_tensor : layer_output_tensors[i - 1], kernel_tensors[i], cublas_handle, stream); row_scaling(layer_output_tensors[i], input_tensor, layer_hidden_tensors[i], stream); matrix_add(layer_output_tensors[i], layer_output_tensors[i], i == 0 ? input_tensor : layer_output_tensors[i - 1], stream); matrix_vec_add(layer_output_tensors[i], layer_output_tensors[i], bias_tensors[i], stream); } } /* * Equivalent TensorFlow Code: * def backward(x, k, y, h, dy, layers): dx = tf.zeros(x.shape) dk = [] db = [] for i in reversed(range(layers)): dx = tf.add(dx, tf.multiply(dy, h[i])) dv = tf.expand_dims(tf.reduce_sum(tf.multiply(dy, x), 1), 1) dk.insert(0, tf.linalg.matvec(x if i == 0 else y[i - 1], tf.transpose(dv), transpose_a=True)) db.insert(0, tf.expand_dims(tf.reduce_sum(dy, 0), 0)) dy = tf.add(dy, tf.matmul(dv, k[i])) dx = tf.add(dx, dy) return dx, dk, db * */ template <typename T> void MultiCrossBackwardFunctor<T>::operator()( cudaStream_t stream, const Tensor2<T>& input_tensor, const Tensors2<T>& kernel_tensors, const Tensors2<T>& layer_output_tensors, const Tensors2<T>& layer_hidden_tensors, const Tensor2<T>& grad_tensor, Tensor2<T>& output_tensor, Tensors2<T>& kernel_output_tensors, Tensors2<T>& bias_output_tensors, Tensor2<T>& tmp_vec_tensor, Tensor2<T> tmp_mat_tensors[], int num_layers) const { cudaMemsetAsync(tmp_mat_tensors[2].get_ptr(), 0, tmp_mat_tensors[2].get_size_in_bytes(), stream); for (int i = num_layers - 1; i >= 0; i--) { row_scaling(tmp_mat_tensors[0], i == num_layers - 1 ? grad_tensor : tmp_mat_tensors[1], layer_hidden_tensors[i], stream); matrix_add(tmp_mat_tensors[2], tmp_mat_tensors[2], tmp_mat_tensors[0], stream); matrix_pair_mul(tmp_vec_tensor, i == num_layers - 1 ? grad_tensor : tmp_mat_tensors[1], input_tensor, stream); row_scaling_sum(kernel_output_tensors[i], i == 0 ? input_tensor : layer_output_tensors[i - 1], tmp_vec_tensor, stream); rows_sum(bias_output_tensors[i], i == num_layers - 1 ? grad_tensor : tmp_mat_tensors[1], stream); out_product(tmp_mat_tensors[0], tmp_vec_tensor, kernel_tensors[i], stream); matrix_add(tmp_mat_tensors[1], i == num_layers - 1 ? grad_tensor : tmp_mat_tensors[1], tmp_mat_tensors[0], stream); } matrix_add(output_tensor, tmp_mat_tensors[2], tmp_mat_tensors[1], stream); } template <typename T> MultiCrossLayer<T>::MultiCrossLayer(const std::shared_ptr<BufferBlock2<T>>& weight_buff, const std::shared_ptr<BufferBlock2<T>>& wgrad_buff, const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff, const Tensor2<T>& in_tensor, const Tensor2<T>& out_tensor, const std::shared_ptr<GPUResource>& gpu_resource, int num_layers, std::vector<Initializer_t> initializer_types) : Layer(gpu_resource, initializer_types), num_layers_(num_layers) { try { // check the in_tensor and out_tensor const auto& in_tensor_dim = in_tensor.get_dimensions(); const auto& out_tensor_dim = out_tensor.get_dimensions(); // 1. two dim? if (in_tensor_dim.size() != 2 || out_tensor_dim.size() != 2) { CK_THROW_(Error_t::WrongInput, "input or output tensor doesn't has two dimensions"); } // 2. same dim? for (int i = 0; i < 2; i++) { if (in_tensor_dim[i] != out_tensor_dim[i]) { CK_THROW_(Error_t::WrongInput, "input and output tensor doesn't match"); } } size_t vec_length = in_tensor_dim[1]; size_t batchsize = in_tensor_dim[0]; // check num_lyaers if (num_layers < 1) { CK_THROW_(Error_t::WrongInput, "num_layers < 1"); } std::vector<size_t> weight_bias_dim = {1, vec_length}; for (int i = 0; i < num_layers; i++) { // setup weights { Tensor2<T> tensor; weight_buff->reserve(weight_bias_dim, &tensor); weights_.push_back(tensor); } // setup bias { Tensor2<T> tensor; weight_buff->reserve(weight_bias_dim, &tensor); weights_.push_back(tensor); } // setup weight gradient { Tensor2<T> tensor; wgrad_buff->reserve(weight_bias_dim, &tensor); wgrad_.push_back(tensor); } // setup bias gradient { Tensor2<T> tensor; wgrad_buff->reserve(weight_bias_dim, &tensor); wgrad_.push_back(tensor); } } in_tensors_.push_back(in_tensor); out_tensors_.push_back(out_tensor); // setup blobs std::vector<size_t> blob_dim = {batchsize, vec_length}; blob_tensors_.push_back(in_tensor); for (int i = 0; i < num_layers - 1; i++) { Tensor2<T> tensor; blobs_buff->reserve(blob_dim, &tensor); blob_tensors_.push_back(tensor); } blob_tensors_.push_back(out_tensor); for (int i = 0; i < 3; i++) { blobs_buff->reserve(blob_dim, &tmp_mat_tensors_[i]); } std::vector<size_t> tmp_vec_dim = {batchsize, 1}; blobs_buff->reserve(tmp_vec_dim, &tmp_vec_tensor_); for (int i = 0; i < num_layers; i++) { Tensor2<T> tensor; blobs_buff->reserve(tmp_vec_dim, &tensor); vec_tensors_.push_back(tensor); } } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; throw; } } template <typename T> void MultiCrossLayer<T>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); Tensors2<T> kernel_tensors; Tensors2<T> bias_tensors; Tensors2<T> output_tensors; Tensors2<T> hidden_tensors; for (int i = 0; i < num_layers_; i++) { kernel_tensors.push_back(weights_[2 * i]); bias_tensors.push_back(weights_[2 * i + 1]); } for (int i = 0; i < num_layers_; i++) { output_tensors.push_back(blob_tensors_[i + 1]); hidden_tensors.push_back(vec_tensors_[i]); } MultiCrossForwardFunctor<T>()(get_gpu().get_stream(), get_gpu().get_cublas_handle(), blob_tensors_[0], kernel_tensors, bias_tensors, output_tensors, hidden_tensors, num_layers_); } template <typename T> void MultiCrossLayer<T>::bprop() { CudaDeviceContext context(get_device_id()); Tensors2<T> kernel_tensors; Tensors2<T> kernel_output_tensors; Tensors2<T> bias_output_tensors; Tensors2<T> forward_output_tensors; Tensors2<T> forward_hidden_tensors; for (int i = 0; i < num_layers_; i++) { kernel_tensors.push_back(weights_[2 * i]); kernel_output_tensors.push_back(wgrad_[2 * i]); bias_output_tensors.push_back(wgrad_[2 * i + 1]); forward_hidden_tensors.push_back(vec_tensors_[i]); } for (int i = 0; i < num_layers_ - 1; i++) { forward_output_tensors.push_back(blob_tensors_[i + 1]); } MultiCrossBackwardFunctor<T>()(get_gpu().get_stream(), blob_tensors_[0], kernel_tensors, forward_output_tensors, forward_hidden_tensors, blob_tensors_[num_layers_], blob_tensors_[0], kernel_output_tensors, bias_output_tensors, tmp_vec_tensor_, tmp_mat_tensors_, num_layers_); } template <typename T> std::unique_ptr<DataSimulator> MultiCrossLayer<T>::get_default_initializer(const int index) { const Tensor2<T>& in_tensor = in_tensors_[0]; const Tensor2<T>& out_tensor = out_tensors_[0]; float bottom_dim = in_tensor.get_dimensions()[1]; float top_dim = out_tensor.get_dimensions()[1]; std::unique_ptr<DataSimulator> simu(nullptr); if (0 == index) { simu.reset(new VarianceScalingSimulator( 1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Uniform, bottom_dim, top_dim)); } else if (1 == index) { simu.reset(new ConstantDataSimulator(0.0f)); } else { CK_THROW_(Error_t::OutOfBound, "index != {0, 1}."); } return simu; } template class MultiCrossLayer<float>; template class MultiCrossLayer<__half>; } // namespace HugeCTR
75196e2558db825a1bf4d6786fc2966c37941152.hip
// !!! This is a file automatically generated by hipify!!! /*The basic model of Danying's 2012 PNAS paper, without explicitly track the adhesion sites. Instead, using a rhoM dependent manner to handle the friction and then the traction force*/ //Feb-22-2018: Looks like for the diffusion term, spectral method does not work well. This is the only term that used finite differetiation in this code // may-23-2020: set the rhoa at front half and rhom at back half #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_runtime.h> #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> #include "rocblas.h" #include "hiprand/hiprand.h" #include "hiprand/hiprand_kernel.h" #include <thrust/extrema.h> #include <thrust/device_ptr.h> #include <hipfft.h> #include <stdio.h> #include<stdlib.h> #include"math.h" #include<algorithm> #include<random> #include<time.h> #define MIN(a,b) ((a<b) ? a:b) #define MAX(a,b) ((a>b) ? a:b) # define M_PI 3.14159265358979323846 /* pi */ #define BLOCK_SIZE_X 16 #define BLOCK_SIZE_Y 16 using namespace std; //simulation grid set up const int Nx = 256; const int Ny = 256; dim3 blocks(Nx / BLOCK_SIZE_X, Ny / BLOCK_SIZE_Y); dim3 threadsperblock(BLOCK_SIZE_X, BLOCK_SIZE_Y); //grid box const float Lx = 25.0f, Ly = 25.0f; const float dx = 2.0f * Lx / (float)Nx, dy = 2.0f * Ly / (float)Ny; //time steps float dt = 2e-3; //relaxation Gamma extern __constant__ float d_Gamma = 0.4f; //phase-field width extern __constant__ float d_epsilon = 2.0f; const float h_epsilon = 2.0f; //runing time and recording time float max_time = 200.0f; const float record_itvl = 10.0f; //intial condition float rhoAinitial = 1.0f; float rhoMinitial = 0.3f; //error control float poisson_err_limit = 0.01f; int poisson_max_steps = 100; float exceed_val = 2.0f; //the exceed value in constructing poisson eq extern __constant__ float d_exceed_val = 2.0f; const float lamda = 1e-4; //some global parameters float r0_cell = 10.0f; //radius of cell extern __constant__ float d_tension = 20.0f; //tension per unit area extern __constant__ float d_bending = 0.0f; //bending energy extern __constant__ float d_Mv = 200.0f; //volume conservation strength extern __constant__ float d_areaMin = 350.0f; //min-area size extern __constant__ float d_areaMax = 600.0f; // max-area size float nu_vis = 1000.0f; //friction and traction parameters float xi_fric = 0.5f; extern __constant__ float d_xiM = 0.0f; extern __constant__ float d_expRhoM = 0.1f; extern __constant__ float d_thresA = 0.5f; float h_diffRhoA = 0.8f; float h_diffRhoM0 = 2.0f; float h_KdRhoM = 0.5f; float h_RhoAtot = 350.0f; float h_kbRhoA = 10.0f; float h_kaRhoA = 0.01f; float h_kcRhoA = 10.0f; float h_K2aRhoA = 1.0f; float h_etaRhoA = 2500.0f; float h_etaRhoM = 200.0f; //cufft and cublas handles hipfftHandle plan_R2C; hipfftHandle plan_C2R; hipblasHandle_t blashandle; hipblasHandle_t h_blashandle; //utility functions void Initialize(float* phi, float *rhog, float *rhor, float* ux, float* uy, float* x, float* y); void writetofile(const char *name, float *u, int Nx, int Ny); void fftcoeffs(float *d_d1x, float *d_d1y, float *d_d2x, float *d_d2y); __global__ void absarray(float *absay, float *ax, float *ay, int Nx, int Ny); __global__ void add3matrix(float *output, float *input1, float *input2, float *input3); __global__ void add2matrix(float *output, float *input1, float *input2); __global__ void minus_matrix(float *output, float *minuend, float *substractor, int Nx, int Ny); __global__ void get_error(float *max_error, float *ux_new, float *ux_old, float *uy_new, float *uy_old, int *idx_eux, int *idx_ux, int *idx_euy, int *idx_uy, int Nx, int Ny); __global__ void matrix_product(float *output, float *input1, float *input2, float alpha, int Nx, int Ny); __global__ void xdir_center(float *xc_sin, float *xc_cos, float *phi, float *phi_area, float *x, float Lx, int m, int n); //derivative functions void par_deriv1_fft(float *output, float *input, int dim, float *deriv_vec, hipfftComplex *R2C_buffer, hipfftComplex *C2R_buffer, int Nx, int Ny); void grad_fft(float *output_x, float *output_y, float *input, float *deriv_vec_x, float *deriv_vec_y, hipfftComplex *R2C_buffer, hipfftComplex *C2R_buffer, int Nx, int Ny); void lap_fft(float *output, float *input, float *deriv_vec_x, float *deriv_vec_y, hipfftComplex *R2C_buffer, hipfftComplex *C2R_buffer, int Nx, int Ny); void div_fft(float *output, float *input_x, float *input_y, float *deriv_vec_x, float *deriv_vec_y, hipfftComplex *R2C_buffer, hipfftComplex *C2R_buffer, int Nx, int Ny); __global__ void fftR2C_deriv1(hipfftComplex *input, hipfftComplex *output, int dim, float *coeff, int Nx, int Ny); __global__ void fftR2C_lap(hipfftComplex *input, hipfftComplex *output, float *coeff_x, float *coeff_y, int Nx, int Ny); __global__ void poissonR2C(hipfftComplex *input, hipfftComplex *output, float *coeff_x, float *coeff_y, float coeff0, float coeff2, int Nx, int Ny); void poisson_sol(float *sol, float *rhs, float coeff0, float coeff2, float *coeff_x, float *coeff_y, hipfftComplex *R2C_buffer, hipfftComplex *C2R_buffer, int Nx, int Ny); void div_pst_fft(float *output, float *c_pst, float *rho, float coeff_diff, float *deriv_vec_x2, float *deriv_vec_y2, float *buffer_x, float *buffer_y, hipfftComplex *R2C_buffer, hipfftComplex *C2R_buffer, int Nx, int Ny); __global__ void div_pst_fd(float *output, float *c_pst, float *rho, float coeff_diff, float dx, float dy, int Nx, int Ny); //physical functions __global__ void curv(float *curv, float *phi_gradx, float *phi_grady, float *absdphi, int Nx, int Ny, float dx, float dy); __global__ void curv(float *curv, float *phi, float *absdphi, int m, int n, float dx, float dy); __global__ void phievolve(float *phi_new, float *phi_old, float *grad_x, float *grad_y, float *absdphi, float *lap_phi, float *curv, float *ux, float *uy, int Nx, int Ny, float dt); __global__ void potential_force(float *output, float *phi, float *lap_phi, float *bendcore, float *phiarea, float A0, float dx, float dy, int Nx, int Ny); __global__ void rhs_poisson(float *rhs_x, float *rhs_y, float *ptl_force, float *phi, float *dphix, float *dphiy, float *vis_x, float *vis_y, float *act_x, float *act_y, float *rhoA, float *rhoM, float *ux, float *uy, int Nx, int Ny); __global__ void div_vel_pre(float *div_x, float *div_y, int dim, float *phi, float nu, float *duxdx, float *duxdy, float *duydx, float *duydy, int Nx, int Ny); __global__ void bend_core(float *core, float *lap_phi, float *phi, int Nx, int Ny); __global__ void div_advect_pre(float *div_x, float *div_y, float *dnsty, float *rho, float *ux, float *uy, int Nx, int Ny); __global__ void diffRhoM(float *output, float *phi, float *rhoa, float Dm0, float Kd, int Nx, int Ny); __global__ void reactionRhoA(float *output, float *phi, float *rhoA, float *rhoAarea, float *phiarea, float rhoAtot, float kb, float K2a, float ka, float kc, float dx, float dy, int Nx, int Ny); __global__ void RD_evolve(float *rho_new, float *dnsty_new, float *dnsty_old, float *rho_old, float *advect, float *diffuse, float *react, float dt, float lamda, int Nx, int Ny); __global__ void RD_evolve(float *rho_new, float *dnsty_new, float *dnsty_old, float *rho_old, float *advect, float *diffuse, float dt, float lamda, int Nx, int Ny); __global__ void normRhoM(float *rho_new, float *rho_area_new, float *rho_are_old, int Nx, int Ny); __global__ void rhoa_right(float *rhoa, float *rhom, float *phi, float x_sin, float x_cos, float *x, float Lx, int m, int n); __global__ void activeForce(float *div_x, float *div_y, int dir, float *phi, float etaRhoA, float etaRhoM, float *rhoA, float *rhoM, float *dphix, float *dphiy, int Nx, int Ny); int main() { //initialize host FILE *ft = fopen("center_traj.txt", "w+"); float *h_x = (float *)malloc(Nx*sizeof(float)); float *h_y = (float *)malloc(Ny*sizeof(float)); float *h_phi = (float *)malloc(Nx*Ny*sizeof(float)); float *h_ux = (float *)malloc(Nx*Ny*sizeof(float)); float *h_uy = (float *)malloc(Nx*Ny*sizeof(float)); float *h_RhoA = (float *)malloc(Nx*Ny*sizeof(float)); float *h_RhoM = (float *)malloc(Nx*Ny*sizeof(float)); float h_xsin = 0.0f; float h_xcos = 0.0f; int file_record = 1; //initialize device float *d_x; hipMalloc((void **)&d_x, Nx*sizeof(float)); float *d_y; hipMalloc((void **)&d_y, Ny*sizeof(float)); float *d_phi_old; hipMalloc((void **)&d_phi_old, Nx*Ny*sizeof(float)); float *d_phi_new; hipMalloc((void **)&d_phi_new, Nx*Ny*sizeof(float)); float *d_ux_old; hipMalloc((void **)&d_ux_old, Nx*Ny*sizeof(float)); float *d_uy_old; hipMalloc((void **)&d_uy_old, Nx*Ny*sizeof(float)); float *d_ux_new; hipMalloc((void **)&d_ux_new, Nx*Ny*sizeof(float)); float *d_uy_new; hipMalloc((void **)&d_uy_new, Nx*Ny*sizeof(float)); float *d_RhoAold; hipMalloc((void **)&d_RhoAold, Nx*Ny*sizeof(float)); float *d_RhoAnew; hipMalloc((void **)&d_RhoAnew, Nx*Ny*sizeof(float)); float *d_RhoMold; hipMalloc((void **)&d_RhoMold, Nx*Ny*sizeof(float)); float *d_RhoMnew; hipMalloc((void **)&d_RhoMnew, Nx*Ny*sizeof(float)); //derivatives of velocities float *d_duxdx; hipMalloc((void **)&d_duxdx, Nx*Ny*sizeof(float)); float *d_duxdy; hipMalloc((void **)&d_duxdy, Nx*Ny*sizeof(float)); float *d_duydx; hipMalloc((void **)&d_duydx, Nx*Ny*sizeof(float)); float *d_duydy; hipMalloc((void **)&d_duydy, Nx*Ny*sizeof(float)); //divergence of the viscosity tensor float *d_div_x; hipMalloc((void **)&d_div_x, Nx*Ny*sizeof(float)); float *d_div_y; hipMalloc((void **)&d_div_y, Nx*Ny*sizeof(float)); //rhs of the poisson equation float *d_rhs_ux; hipMalloc((void **)&d_rhs_ux, Nx*Ny*sizeof(float)); float *d_rhs_uy; hipMalloc((void **)&d_rhs_uy, Nx*Ny*sizeof(float)); //substrate information //forces float *d_ActForceX; hipMalloc((void **)&d_ActForceX, Nx*Ny*sizeof(float)); float *d_ActForceY; hipMalloc((void **)&d_ActForceY, Nx*Ny*sizeof(float)); //forces in forms of potential * grad(phi) float *d_ptl_force; hipMalloc((void **)&d_ptl_force, Nx*Ny*sizeof(float)); //define gradient, laplacian, curvature, Gprime, absgrad float *d_dphix; hipMalloc((void **)&d_dphix, Nx*Ny*sizeof(float)); float *d_dphiy; hipMalloc((void **)&d_dphiy, Nx*Ny*sizeof(float)); float *d_phi_absgrad; hipMalloc((void **)&d_phi_absgrad, Nx*Ny*sizeof(float)); float *d_phi_lap; hipMalloc((void **)&d_phi_lap, Nx*Ny*sizeof(float)); float *d_phi_curv; hipMalloc((void **)&d_phi_curv, Nx*Ny*sizeof(float)); float *d_ftd1_x; hipMalloc((void **)&d_ftd1_x, Nx*sizeof(float)); float *d_ftd1_y; hipMalloc((void **)&d_ftd1_y, Ny*sizeof(float)); float *d_ftd2_x; hipMalloc((void **)&d_ftd2_x, Nx*sizeof(float)); float *d_ftd2_y; hipMalloc((void **)&d_ftd2_y, Ny*sizeof(float)); float *d_xc_sin; hipMalloc((void **)&d_xc_sin, Nx*Ny*sizeof(float)); float *d_xc_cos; hipMalloc((void **)&d_xc_cos, Nx*Ny*sizeof(float)); //temporary buffers float *d_temp_buffer; hipMalloc((void **)&d_temp_buffer, Nx*Ny*sizeof(float)); //cublas buffer hipfftComplex *d_fftR2C_buffer; hipMalloc((void **)&d_fftR2C_buffer, (Nx / 2 + 1)*Ny*sizeof(hipfftComplex)); //cufft R2C buffer as fft result hipfftComplex *d_fftC2R_buffer; hipMalloc((void **)&d_fftC2R_buffer, (Nx / 2 + 1)*Ny*sizeof(hipfftComplex)); //as C2R buffer float *d_buffer_x; hipMalloc((void **)&d_buffer_x, Nx*Ny*sizeof(float)); float *d_buffer_y; hipMalloc((void **)&d_buffer_y, Nx*Ny*sizeof(float)); float *d_advect_buffer; hipMalloc((void **)&d_advect_buffer, Nx*Ny*sizeof(float)); float *d_diffuse_buffer; hipMalloc((void **)&d_diffuse_buffer, Nx*Ny*sizeof(float)); float *d_reaction_buffer; hipMalloc((void **)&d_reaction_buffer, Nx*Ny*sizeof(float)); float *d_DiffRhoM; hipMalloc((void **)&d_DiffRhoM, Nx*Ny*sizeof(float)); //the rhoa-dependent rhom diffusion constant float *d_rhoAarea; hipMalloc((void **)&d_rhoAarea, sizeof(float)); //total active rhoA float *d_phiArea; hipMalloc((void **)&d_phiArea, sizeof(float)); //total area of phi float *d_rhoMoldArea; hipMalloc((void **)&d_rhoMoldArea, sizeof(float)); //total of rhoM old float *d_rhoMnewArea; hipMalloc((void **)&d_rhoMnewArea, sizeof(float)); //total of rhoM new float *d_bendcore; hipMalloc((void **)&d_bendcore,Nx*Ny*sizeof(float)); //the bending core //fft coefficients fftcoeffs(d_ftd1_x,d_ftd1_y, d_ftd2_x,d_ftd2_y); //handles hipfftPlan2d(&plan_R2C, Nx, Ny, HIPFFT_R2C); hipfftPlan2d(&plan_C2R, Nx, Ny, HIPFFT_C2R); hipblasCreate(&blashandle); hipblasSetPointerMode(blashandle, HIPBLAS_POINTER_MODE_DEVICE); //make the cublas return value to device hipblasCreate(&h_blashandle); //timer start hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); //initialize the global parameters //r0_cell = 6.0f; Initialize(h_phi, h_RhoA, h_RhoM, h_ux, h_uy, h_x, h_y); //copy memory hipMemcpy(d_x, h_x, Nx*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, h_y, Ny*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_phi_old, h_phi, Nx*Ny*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_phi_new, h_phi, Nx*Ny*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_RhoAold, h_RhoA, Nx*Ny*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_RhoMold, h_RhoM, Nx*Ny*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_RhoAnew, h_RhoA, Nx*Ny*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_RhoMnew, h_RhoM, Nx*Ny*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_ux_old, h_ux, Nx*Ny*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_uy_old, h_uy, Nx*Ny*sizeof(float), hipMemcpyHostToDevice); //area size volume float A0; hipblasSasum(h_blashandle, Nx*Ny, d_phi_old, 1, &A0); A0 = A0 * dx * dy; //error parameters float max_error; int iter_steps; int *idx_maxerr_ux; hipMalloc((void **)& idx_maxerr_ux, sizeof(int)); int *idx_max_ux; hipMalloc((void **)&idx_max_ux, sizeof(int)); int *idx_maxerr_uy; hipMalloc((void **)& idx_maxerr_uy, sizeof(int)); int *idx_max_uy; hipMalloc((void **)&idx_max_uy, sizeof(int)); float *d_max_error; hipMalloc((void **)&d_max_error, sizeof(float)); //mass center grad_fft(d_dphix, d_dphiy, d_phi_old, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); hipLaunchKernelGGL(( absarray) , dim3(blocks), dim3(threadsperblock) , 0, 0, d_phi_absgrad, d_dphix, d_dphiy, Nx, Ny); lap_fft(d_phi_lap, d_phi_old, d_ftd2_x, d_ftd2_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); for (int steps = 0; steps<(int)(max_time / dt); steps++){ //curv <<<blocks, threadsperblock >>>(d_phi_curv, d_dphix, d_dphiy, d_phi_absgrad, Nx, Ny, dx,dy); hipLaunchKernelGGL(( curv) , dim3(blocks), dim3(threadsperblock) , 0, 0, d_phi_curv, d_phi_old, d_phi_absgrad, Nx, Ny, dx,dy); //solve phi hipLaunchKernelGGL(( phievolve) , dim3(blocks), dim3(threadsperblock) , 0, 0, d_phi_new, d_phi_old, d_dphix, d_dphiy, d_phi_absgrad, d_phi_lap, d_phi_curv, d_ux_old, d_uy_old, Nx, Ny, dt); //update derivatives grad_fft(d_dphix, d_dphiy, d_phi_new, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); hipLaunchKernelGGL(( absarray) , dim3(blocks), dim3(threadsperblock) , 0, 0, d_phi_absgrad, d_dphix, d_dphiy, Nx, Ny); lap_fft(d_phi_lap, d_phi_new, d_ftd2_x, d_ftd2_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); //update rhoA and rhoM //rhoA advection //div_advect_pre<<<blocks, threadsperblock >>>(d_div_x, d_div_y, d_phi_old, d_RhoAold, d_ux_old, d_uy_old, Nx, Ny); //div_fft(d_advect_buffer, d_div_x, d_div_y, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); //rhoA diffusion //div_pst_fd<<<blocks, threadsperblock >>>(d_diffuse_buffer, d_phi_old, d_RhoAold, h_diffRhoA, dx, dy, Nx, Ny); //rhoA reaction hipblasSasum(blashandle, Nx * Ny, d_phi_old, 1, d_phiArea); //matrix_product<<<blocks, threadsperblock >>>(d_temp_buffer, d_RhoAold, d_phi_old, 1.0f, Nx, Ny); //hipblasSasum(blashandle, Nx * Ny, d_temp_buffer, 1, d_rhoAarea); //reactionRhoA<<<blocks, threadsperblock >>>(d_reaction_buffer, d_phi_old, d_RhoAold, d_rhoAarea, d_phiArea, h_RhoAtot, h_kbRhoA, h_K2aRhoA, h_kaRhoA, h_kcRhoA, dx, dy, Nx, Ny); //time evolve //RD_evolve<<<blocks, threadsperblock >>>(d_RhoAnew, d_phi_new, d_phi_old, d_RhoAold, d_advect_buffer, d_diffuse_buffer, d_reaction_buffer, dt, lamda, Nx, Ny); hipLaunchKernelGGL(( rhoa_right), dim3(blocks), dim3(threadsperblock), 0, 0, d_RhoAold, d_RhoMold, d_phi_old, h_xsin, h_xcos, d_x, Lx, Nx, Ny); hipLaunchKernelGGL(( rhoa_right), dim3(blocks), dim3(threadsperblock), 0, 0, d_RhoAnew, d_RhoMnew, d_phi_new, h_xsin, h_xcos, d_x, Lx, Nx, Ny); //rhoM advection //div_advect_pre<<<blocks, threadsperblock >>>(d_div_x, d_div_y, d_phi_old, d_RhoMold, d_ux_old, d_uy_old, Nx, Ny); //div_fft(d_advect_buffer, d_div_x, d_div_y, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); //rhoM diffusion //diffRhoM<<<blocks, threadsperblock >>>(d_DiffRhoM, d_phi_old, d_RhoAold, h_diffRhoM0, h_KdRhoM, Nx, Ny); //div_pst_fd<<<blocks, threadsperblock >>>(d_diffuse_buffer, d_DiffRhoM, d_RhoMold, 1.0f, dx, dy, Nx, Ny); //no rhoM reaction //RD_evolve<<<blocks, threadsperblock >>>(d_RhoMnew, d_phi_new, d_phi_old, d_RhoMold, d_advect_buffer, d_diffuse_buffer, dt, lamda, Nx, Ny); //normRhoM rhoM //matrix_product<<<blocks, threadsperblock>>>(d_temp_buffer, d_RhoMold, d_phi_old, dx * dy, Nx, Ny); //hipblasSasum(blashandle, Nx * Ny, d_temp_buffer, 1, d_rhoMoldArea); //matrix_product<<<blocks, threadsperblock>>>(d_temp_buffer, d_RhoMnew, d_phi_new, dx * dy, Nx, Ny); //hipblasSasum(blashandle, Nx * Ny, d_temp_buffer, 1, d_rhoMnewArea); //normRhoM<<<blocks, threadsperblock>>>(d_RhoMnew, d_rhoMnewArea, d_rhoMoldArea, Nx, Ny); //only rhoA at the right //update forces hipLaunchKernelGGL(( bend_core), dim3(blocks), dim3(threadsperblock), 0, 0, d_bendcore, d_phi_lap, d_phi_new, Nx, Ny); lap_fft(d_bendcore, d_bendcore, d_ftd2_x, d_ftd2_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); hipLaunchKernelGGL(( potential_force), dim3(blocks), dim3(threadsperblock) , 0, 0, d_ptl_force, d_phi_new, d_phi_lap, d_bendcore, d_phiArea, A0, dx, dy, Nx, Ny); //active force hipLaunchKernelGGL(( activeForce), dim3(blocks), dim3(threadsperblock), 0, 0, d_div_x, d_div_y, 1, d_phi_new, h_etaRhoA, h_etaRhoM, d_RhoAnew, d_RhoMnew, d_dphix, d_dphiy, Nx, Ny); div_fft(d_ActForceX, d_div_x, d_div_y, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); hipLaunchKernelGGL(( activeForce), dim3(blocks), dim3(threadsperblock), 0, 0, d_div_x, d_div_y, 2, d_phi_new, h_etaRhoA, h_etaRhoM, d_RhoAnew, d_RhoMnew, d_dphix, d_dphiy, Nx, Ny); div_fft(d_ActForceY, d_div_x, d_div_y, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); max_error = 10.0f; iter_steps = 0; while(max_error > poisson_err_limit && iter_steps < poisson_max_steps){ //grad(ux) grad_fft(d_duxdx, d_duxdy, d_ux_old, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); //grad(uy) grad_fft(d_duydx, d_duydy, d_uy_old, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); //grad(uz) //vel_div_x hipLaunchKernelGGL(( div_vel_pre), dim3(blocks), dim3(threadsperblock) , 0, 0, d_div_x, d_div_y, 1, d_phi_new, nu_vis, d_duxdx, d_duxdy, d_duydx, d_duydy, Nx, Ny); div_fft(d_buffer_x, d_div_x, d_div_y, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); //vel_div_y hipLaunchKernelGGL(( div_vel_pre), dim3(blocks), dim3(threadsperblock) , 0, 0, d_div_x, d_div_y, 2, d_phi_new, nu_vis, d_duxdx, d_duxdy, d_duydx, d_duydy, Nx, Ny); div_fft(d_buffer_y, d_div_x, d_div_y, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); hipLaunchKernelGGL(( rhs_poisson), dim3(blocks), dim3(threadsperblock) , 0, 0, d_rhs_ux, d_rhs_uy, d_ptl_force, d_phi_new, d_dphix, d_dphiy, d_buffer_x, d_buffer_y, d_ActForceX, d_ActForceY, d_RhoAnew, d_RhoMnew, d_ux_old, d_uy_old, Nx, Ny); poisson_sol(d_ux_new, d_rhs_ux, xi_fric, nu_vis*exceed_val, d_ftd2_x, d_ftd2_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); poisson_sol(d_uy_new, d_rhs_uy, xi_fric, nu_vis*exceed_val, d_ftd2_x, d_ftd2_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); //modify ux uy uz in regions of phi < 0.0001 not done //get error hipLaunchKernelGGL(( minus_matrix), dim3(blocks), dim3(threadsperblock) , 0, 0, d_buffer_x, d_ux_new, d_ux_old, Nx, Ny); hipLaunchKernelGGL(( minus_matrix), dim3(blocks), dim3(threadsperblock) , 0, 0, d_buffer_y, d_uy_new, d_uy_old, Nx, Ny); hipblasIsamax(blashandle, Nx*Ny, d_buffer_x, 1, idx_maxerr_ux); hipblasIsamax(blashandle, Nx*Ny, d_ux_new, 1, idx_max_ux); hipblasIsamax(blashandle, Nx*Ny, d_buffer_y, 1, idx_maxerr_uy); hipblasIsamax(blashandle, Nx*Ny, d_uy_new, 1, idx_max_uy); hipLaunchKernelGGL(( get_error), dim3(1), dim3(1), 0, 0, d_max_error, d_ux_new, d_ux_old, d_uy_new, d_uy_old, idx_maxerr_ux, idx_max_ux, idx_maxerr_uy, idx_max_uy, Nx, Ny); hipMemcpy(&max_error, d_max_error, sizeof(float), hipMemcpyDeviceToHost); iter_steps++; hipblasScopy(blashandle, Nx*Ny, d_ux_new, 1, d_ux_old, 1); hipblasScopy(blashandle, Nx*Ny, d_uy_new, 1, d_uy_old, 1); } if(max_error > poisson_err_limit && steps > 500){ printf("step %f wrong err %f\n", steps * dt, max_error); break; } //record center if(steps % 500 == 0){ hipLaunchKernelGGL(( xdir_center), dim3(blocks), dim3(threadsperblock), 0, 0, d_xc_sin, d_xc_cos, d_phi_new, d_phiArea, d_x, Lx, Nx, Ny); thrust::device_ptr<float> d_sin = thrust::device_pointer_cast(d_xc_sin); h_xsin = thrust::reduce(d_sin, d_sin + Nx*Ny); thrust::device_ptr<float> d_cos = thrust::device_pointer_cast(d_xc_cos); h_xcos = thrust::reduce(d_cos, d_cos + Nx*Ny); fprintf(ft, "%f %f\n", steps*dt, atan2(h_xsin, h_xcos)/M_PI*Lx); } if(steps % (int)(record_itvl/dt)==0 && steps > 0){ char phi_name[50];sprintf(phi_name,"phi_profile_%d.txt", file_record); hipMemcpy(h_phi, d_phi_old, Nx*Ny*sizeof(float), hipMemcpyDeviceToHost); writetofile(phi_name, h_phi, Nx, Ny); char rhoA_name[50];sprintf(rhoA_name,"rhoA_profile_%d.txt",file_record); hipMemcpy(h_RhoA, d_RhoAold, Nx*Ny*sizeof(float), hipMemcpyDeviceToHost); writetofile(rhoA_name, h_RhoA, Nx, Ny); char rhoM_name[50];sprintf(rhoM_name,"rhoM_profile_%d.txt",file_record); hipMemcpy(h_RhoM, d_RhoMold, Nx*Ny*sizeof(float), hipMemcpyDeviceToHost); writetofile(rhoM_name, h_RhoM, Nx, Ny); char ux_name[50];sprintf(ux_name,"ux_profile_%d.txt",file_record); hipMemcpy(h_ux, d_ux_old, Nx*Ny*sizeof(float), hipMemcpyDeviceToHost); writetofile(ux_name, h_ux, Nx, Ny); char uy_name[50];sprintf(uy_name,"uy_profile_%d.txt",file_record); hipMemcpy(h_uy, d_uy_old, Nx*Ny*sizeof(float), hipMemcpyDeviceToHost); writetofile(uy_name, h_uy, Nx, Ny); file_record++; } //swap old and new hipblasScopy(blashandle, Nx*Ny, d_phi_new, 1, d_phi_old, 1); hipblasScopy(blashandle, Nx*Ny, d_RhoAnew, 1, d_RhoAold, 1); hipblasScopy(blashandle, Nx*Ny, d_RhoMnew, 1, d_RhoMold, 1); } //final record of the shape char phi_name[50]; sprintf(phi_name,"phi_profile.txt", file_record); hipMemcpy(h_phi, d_phi_old, Nx*Ny*sizeof(float), hipMemcpyDeviceToHost); writetofile(phi_name, h_phi, Nx, Ny); char rhoA_name[50]; sprintf(rhoA_name,"rhoA_profile.txt",file_record); hipMemcpy(h_RhoA, d_RhoAold, Nx*Ny*sizeof(float), hipMemcpyDeviceToHost); writetofile(rhoA_name, h_RhoA, Nx, Ny); char rhoM_name[50]; sprintf(rhoM_name,"rhoM_profile.txt",file_record); hipMemcpy(h_RhoM, d_RhoMold, Nx*Ny*sizeof(float), hipMemcpyDeviceToHost); writetofile(rhoM_name, h_RhoM, Nx, Ny); char ux_name[50];sprintf(ux_name,"ux_profile.txt",file_record); hipMemcpy(h_ux, d_ux_old, Nx*Ny*sizeof(float), hipMemcpyDeviceToHost); writetofile(ux_name, h_ux, Nx, Ny); char uy_name[50];sprintf(uy_name,"uy_profile.txt",file_record); hipMemcpy(h_uy, d_uy_old, Nx*Ny*sizeof(float), hipMemcpyDeviceToHost); writetofile(uy_name, h_uy, Nx, Ny); //destroy plan and handles hipfftDestroy(plan_R2C); hipfftDestroy(plan_C2R); hipblasDestroy(blashandle); hipblasDestroy(h_blashandle); fclose(ft); hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsetime; hipEventElapsedTime(&elapsetime, start, stop); std::printf("time need: %f s \n", elapsetime / 1000); return 0; } void Initialize(float* phi, float *rhoA, float *rhoM, float* ux, float* uy, float* x, float* y){ for (int i = 0; i < Nx; i++)x[i] = -Lx + 2 * Lx / (float)Nx*(float)i; for (int i = 0; i < Ny; i++)y[i] = -Ly + 2 * Ly / (float)Ny*(float)i; for (int i = 0; i < Nx; i++){ for (int j = 0; j < Ny; j++){ float dis = sqrt(x[i]*x[i] + y[j]*y[j]); int index = i + j * Nx; phi[index]=0.5f + 0.5f * tanh(3.0f*(r0_cell-dis) / h_epsilon); rhoA[index] = 0.0f; if(x[i] > 0.0f){ rhoA[index] = rhoAinitial * phi[index]; } rhoM[index] = rhoMinitial * phi[index]; ux[index] = 0.0f; uy[index] = 0.0f; } } } void writetofile(const char *name, float *u, int Nx, int Ny){ FILE *fp = fopen(name, "w+"); for (int j = 0; j < Ny; j++){ for (int i = 0; i < Nx; i++){ fprintf(fp, "%f ", u[i + j*Nx]); } fprintf(fp, "\n"); } fclose(fp); } __global__ void phievolve(float *phi_new, float *phi_old, float *grad_x, float *grad_y, float *absdphi, float *lap_phi, float *curv, float *ux, float *uy, int Nx, int Ny, float dt){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; float dG = 36.0f * phi_old[index] * (1.0f - phi_old[index]) * (1.0f - 2.0f * phi_old[index]); phi_new[index] = phi_old[index] + dt*(-ux[index] * grad_x[index] - uy[index] * grad_y[index] + d_Gamma*(d_epsilon*lap_phi[index] - dG / d_epsilon + curv[index] * d_epsilon * absdphi[index])); } __global__ void div_vel_pre(float *div_x, float *div_y, int dim, float *phi, float nu, float *duxdx, float *duxdy, float *duydx, float *duydy, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; if(dim == 1){ div_x[index] = 2.0f * nu * phi[index] * duxdx[index] - nu * d_exceed_val * duxdx[index]; //sigma_xx div_y[index] = nu * phi[index] * (duxdy[index] + duydx[index]) - nu * d_exceed_val * duxdy[index]; //sigma_xy } if(dim == 2){ div_x[index] = nu * phi[index] *(duydx[index] + duxdy[index]) - nu * d_exceed_val * duydx[index]; //sigma_yx div_y[index] = 2.0f * nu * phi[index] * duydy[index] - nu * d_exceed_val * duydy[index]; //sigma_yy } } __global__ void potential_force(float *output, float *phi, float *lap_phi, float *bendcore, float *phiarea, float A0, float dx, float dy, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; float dGphi = 36.0f * phi[index] * (1.0f - phi[index]) * (1.0f - 2.0f*phi[index]); float ddGphi = 36.0f*(1.0f - 6.0f*phi[index] + 6.0f*phi[index] * phi[index]); float core = lap_phi[index] - dGphi / d_epsilon / d_epsilon; float current_area = *phiarea * dx * dy; float delta_area = 0.0f; if(current_area < d_areaMin) delta_area = current_area - d_areaMin; if(current_area > d_areaMax) delta_area = current_area - d_areaMax; output[index] = -d_tension * d_epsilon * core + d_bending * d_epsilon * (bendcore[index] - ddGphi * core / d_epsilon / d_epsilon) + d_Mv * delta_area; } __global__ void rhs_poisson(float *rhs_x, float *rhs_y, float *ptl_force, float *phi, float *dphix, float *dphiy, float *vis_x, float *vis_y, float *act_x, float *act_y, float *rhoA, float *rhoM, float *ux, float *uy, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; rhs_x[index] = ptl_force[index] * dphix[index] + act_x[index] + vis_x[index]; rhs_y[index] = ptl_force[index] * dphiy[index] + act_y[index] + vis_y[index]; } __global__ void get_error(float *max_error, float *ux_new, float *ux_old, float *uy_new, float *uy_old, int *idx_eux, int *idx_ux, int *idx_euy, int *idx_uy, int Nx, int Ny){ float err_ux = abs(ux_new[*idx_eux - 1] - ux_old[*idx_eux - 1]) / abs(ux_new[*idx_ux - 1]); float err_uy = abs(uy_new[*idx_euy - 1] - uy_old[*idx_euy - 1]) / abs(uy_new[*idx_uy - 1]); *max_error = MAX(err_ux, err_uy); } __global__ void div_advect_pre(float *div_x, float *div_y, float *dnsty, float *rho, float *ux, float *uy, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; //advection on the interface div_x[index] = dnsty[index] * rho[index] * ux[index]; div_y[index] = dnsty[index] * rho[index] * uy[index]; } __global__ void RD_evolve(float *rho_new, float *dnsty_new, float *dnsty_old, float *rho_old, float *advect, float *diffuse, float *react, float dt, float lamda, int Nx, int Ny){ //with reaction terms int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; rho_new[index] = 0.0f; if(dnsty_old[index] > lamda){ rho_new[index] = (2.0f * dnsty_old[index] - dnsty_new[index]) / dnsty_old[index] * rho_old[index] + dt / dnsty_old[index] *(diffuse[index] - advect[index] + react[index]); } } __global__ void RD_evolve(float *rho_new, float *dnsty_new, float *dnsty_old, float *rho_old, float *advect, float *diffuse, float dt, float lamda, int Nx, int Ny){ //without reaction terms int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; rho_new[index] = 0.0f; if(dnsty_old[index] > lamda){ rho_new[index] = (2.0f * dnsty_old[index] - dnsty_new[index]) / dnsty_old[index] * rho_old[index] + dt / dnsty_old[index] *(diffuse[index] - advect[index]); } } __global__ void normRhoM(float *rho, float *rho_area_new, float *rho_are_old, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; rho[index] = rho[index] * (*rho_are_old) / (*rho_area_new); } __global__ void diffRhoM(float *output, float *phi, float *rhoa, float Dm0, float Kd, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; output[index] = phi[index] * Dm0 / (1.0f + rhoa[index] / Kd); } __global__ void reactionRhoA(float *output, float *phi, float *rhoA, float *rhoAarea, float *phiarea, float rhoAtot, float kb, float K2a, float ka, float kc, float dx, float dy, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; float rhoAcyt = (rhoAtot - (*rhoAarea) * dx * dy) / ((*phiarea) * dx * dy); output[index] = phi[index] * (kb * (rhoA[index] * rhoA[index] / (K2a * K2a + rhoA[index] * rhoA[index]) + ka) * rhoAcyt - kc * rhoA[index]); } __global__ void bend_core(float *core, float *lap_phi, float *phi, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; if (i < Nx && j < Ny){ core[index]=lap_phi[index]-36.0f*phi[index]*(1.0f-phi[index])*(1.0f-2.0f*phi[index])/d_epsilon/d_epsilon; } } __global__ void activeForce(float *div_x, float *div_y, int dir, float *phi, float etaRhoA, float etaRhoM, float *rhoA, float *rhoM, float *dphix, float *dphiy, int Nx, int Ny){ //active force by rhoA and rhoM int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; //x-dir if(dir == 1){ div_x[index] = -etaRhoA * d_epsilon * phi[index] * rhoA[index] * dphix[index] * dphix[index] + etaRhoM * phi[index] * (1.0f - rhoA[index]); div_y[index] = -etaRhoA * d_epsilon * phi[index] * rhoA[index] * dphix[index] * dphiy[index]; } //y-dir if(dir == 2){ div_x[index] = -etaRhoA * d_epsilon * phi[index] * rhoA[index] * dphix[index] * dphiy[index]; div_y[index] = -etaRhoA * d_epsilon * phi[index] * rhoA[index] * dphiy[index] * dphiy[index] + etaRhoM * phi[index] * (1.0f - rhoA[index]); } } /*--------------------------------------------------------------------------------------*/ /*Utility functions*/ /*--------------------------------------------------------------------------------------*/ __global__ void matrix_product(float *output, float *input1, float *input2, float alpha, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; output[index] = alpha * input1[index] * input2[index]; } __global__ void add3matrix(float *output, float *input1, float *input2, float *input3){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; output[index] = input1[index] + input2[index] + input3[index]; } __global__ void add2matrix(float *output, float *input1, float *input2){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; output[index] = input1[index] + input2[index]; } __global__ void absarray(float *absay, float *ax, float *ay, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*Nx; absay[index] = sqrt(ax[index] * ax[index] + ay[index] * ay[index]); } __global__ void minus_matrix(float *output, float *minuend, float *substractor, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; output[index] = minuend[index] - substractor[index]; } __global__ void xdir_center(float *xc_sin, float *xc_cos, float *phi, float *phi_area, float *x, float Lx, int m, int n){ //return the matrix of phi*sin(x*pi/Lx), phi*cos(x*phi/Lx) int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*blockDim.x*gridDim.x; if(i< m && j<n){ xc_sin[index] = phi[index] / (*phi_area) * sin(x[i] * M_PI / Lx); xc_cos[index] = phi[index] / (*phi_area) * cos(x[i] * M_PI / Lx); } } __global__ void rhoa_right(float *rhoa, float *rhom, float *phi, float x_sin, float x_cos, float *x, float Lx, int m, int n){ //put rhoa at the right half plane of mass centern int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*m; if( i < m && j < n){ rhoa[index] = 0.0f; rhom[index] = 1.0f; if(sin(x[i]*M_PI/Lx) * x_cos - cos(x[i]*M_PI/Lx)*x_sin > -0.5f && phi[index] > 1e-4){ rhoa[index] = 1.0f; rhom[index] = 0.0f; } } } /*----------------------------------------------------------------------------------------------*/ /*Don't Change The Code After This Line if You Don't Want to Change the Differentiation Methods*/ /*----------------------------------------------------------------------------------------------*/ __global__ void curv(float *curv, float *phi_gradx, float *phi_grady, float *absdphi, int Nx, int Ny, float dx, float dy){ //from gradient obtained by fft int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*Nx; //f: forward; b:backward int fdx = i + 1; if (i == Nx - 1) fdx = 0; fdx = fdx + j*Nx; int bdx = i - 1; if (i == 0) bdx = Nx - 1; bdx = bdx + j*Nx; int fdy = j + 1; if (j == Ny - 1) fdy = 0; fdy = i + fdy*Nx; int bdy = j - 1; if (j == 0) bdy = Ny - 1; bdy = i + bdy*Nx; curv[index] = 0.0f; if (absdphi[index] >= 0.01 && absdphi[fdx] >= 0.01 && absdphi[bdx] >= 0.01 && absdphi[fdy] >= 0.01 && absdphi[bdy] >= 0.01) curv[index] = -(phi_gradx[fdx] / absdphi[fdx] - phi_gradx[bdx] / absdphi[bdx]) / 2.0f / dx - (phi_grady[fdy] / absdphi[fdy] - phi_grady[bdy] / absdphi[bdy]) / 2.0f / dy; } __global__ void curv(float *curv, float *phi, float *absdphi, int m, int n, float dx, float dy){ //from gradient obtained by fft int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*m; if(i<m && j<n){ curv[index] = 0.0f; if(absdphi[index]>0.001f){ int r_dx = i + 1; if (i == m - 1) r_dx = 0; int l_dx = i - 1; if (i == 0) l_dx = m - 1; int u_dy = j + 1; if (j == n - 1) u_dy = 0; int d_dy = j - 1; if (j == 0) d_dy = n - 1; //geth the 9 points needed for curvature calculation float phi_i_j=phi[index]; //(i,j) float phi_ip1_j=phi[r_dx+j*m]; //(i+1,j) float phi_im1_j=phi[l_dx+j*m]; //(i-1,j) float phi_i_jp1=phi[i+u_dy*m]; //(i,j+1) float phi_i_jm1=phi[i+d_dy*m]; //(i,j-1) float phi_ip1_jp1=phi[r_dx+u_dy*m]; //(i+1,j+1) float phi_ip1_jm1=phi[r_dx+d_dy*m]; //(i+1,j-1) float phi_im1_jp1=phi[l_dx+u_dy*m]; //(i-1,j+1) float phi_im1_jm1=phi[l_dx+d_dy*m]; //(i-1,j-1) float phix_iphalf_j = (phi_ip1_j - phi_i_j )/dx; float phix_imhalf_j = (phi_i_j - phi_im1_j)/dx; float phiy_i_jphalf = (phi_i_jp1 - phi_i_j )/dy; float phiy_i_jmhalf = (phi_i_j - phi_i_jm1)/dy; float phiy_iphalf_j = (phi_ip1_jp1 + phi_i_jp1 - phi_ip1_jm1 - phi_i_jm1 )/(4.0f*dy); float phiy_imhalf_j = (phi_i_jp1 + phi_im1_jp1 - phi_i_jm1 - phi_im1_jm1)/(4.0f*dy); float phix_i_jphalf = (phi_ip1_jp1 + phi_ip1_j - phi_im1_jp1 - phi_im1_j )/(4.0f*dx); float phix_i_jmhalf = (phi_ip1_j + phi_ip1_jm1 - phi_im1_j - phi_im1_jm1)/(4.0f*dx); float grad_phi_abs_iphalf_j = sqrt( phix_iphalf_j * phix_iphalf_j + phiy_iphalf_j * phiy_iphalf_j ); float grad_phi_abs_imhalf_j = sqrt( phix_imhalf_j * phix_imhalf_j + phiy_imhalf_j * phiy_imhalf_j); float grad_phi_abs_i_jphalf = sqrt( phix_i_jphalf * phix_i_jphalf + phiy_i_jphalf * phiy_i_jphalf ); float grad_phi_abs_i_jmhalf = sqrt( phix_i_jmhalf * phix_i_jmhalf + phiy_i_jmhalf * phiy_i_jmhalf ); curv[index] = - ( phix_iphalf_j / grad_phi_abs_iphalf_j - phix_imhalf_j / grad_phi_abs_imhalf_j )/dx - ( phiy_i_jphalf / grad_phi_abs_i_jphalf - phiy_i_jmhalf / grad_phi_abs_i_jmhalf )/dy; } } } void fftcoeffs(float *d_d1x, float *d_d1y, float *d_d2x, float *d_d2y){ //FFT coefficients, d_d1x, d_d1y are device 1-st derivatie grid matrixes, d_d2x, d_d2y are device 2-nd derivative matrix float *h_kx2 = (float *)malloc(Nx*sizeof(float)); for (int i = 0; i <= Nx / 2; i++) h_kx2[i] = (float)i * M_PI / Lx; for (int i = Nx / 2 + 1; i < Nx; i++) h_kx2[i] = ((float)i - (float)Nx) * M_PI / Lx; hipMemcpy(d_d2x, h_kx2, Nx*sizeof(float), hipMemcpyHostToDevice); float *h_ky2 = (float *)malloc(Ny*sizeof(float)); for (int i = 0; i <= Ny / 2; i++) h_ky2[i] = (float)i * M_PI / Ly; for (int i = Ny / 2 + 1; i < Ny; i++) h_ky2[i] = ((float)i - (float)Ny) * M_PI / Ly; hipMemcpy(d_d2y, h_ky2, Ny*sizeof(float), hipMemcpyHostToDevice); float *h_kx1 = (float *)malloc(Nx*sizeof(float)); for (int i = 0; i < Nx / 2; i++) h_kx1[i] = (float)i * M_PI / Lx; h_kx1[Nx / 2] = 0.0f; for (int i = Nx / 2 + 1; i < Nx; i++) h_kx1[i] = ((float)i - (float)Nx) * M_PI / Lx; hipMemcpy(d_d1x, h_kx1, Nx*sizeof(float), hipMemcpyHostToDevice); float *h_ky1 = (float *)malloc(Ny*sizeof(float)); for (int i = 0; i < Ny / 2; i++)h_ky1[i] = (float)i * M_PI / Ly; h_ky1[Ny / 2] = 0.0f; for (int i = Ny / 2 + 1; i < Ny; i++) h_ky1[i] = ((float)i - (float)Ny) * M_PI / Ly; hipMemcpy(d_d1y, h_ky1, Ny*sizeof(float), hipMemcpyHostToDevice); free(h_kx1); free(h_kx2); free(h_ky1); free(h_ky2); } __global__ void fftR2C_deriv1(hipfftComplex *input, hipfftComplex *output, int dim, float *coeff, int Nx, int Ny){ //x:dim=1; y:dim=2; int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*(Nx / 2 + 1); int dir[2]={i,j}; if (i < (Nx / 2 + 1) && j < Ny){ hipfftComplex temp = input[index]; output[index].x = -temp.y*coeff[dir[dim-1]] / (float)Nx / (float)Ny; output[index].y = temp.x*coeff[dir[dim-1]] / (float)Nx / (float)Ny; } } __global__ void fftR2C_lap(hipfftComplex *input, hipfftComplex *output, float *coeff_x, float *coeff_y, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*(Nx / 2 + 1); if (i < (Nx / 2 + 1) && j < Ny){ hipfftComplex temp = input[index]; float coeff = coeff_x[i] * coeff_x[i] + coeff_y[j] * coeff_y[j]; output[index].x = -temp.x*coeff / (float)Nx / (float)Ny; output[index].y = -temp.y*coeff / (float)Nx / (float)Ny; } } void grad_fft(float *output_x, float *output_y, float *input, float *deriv_vec_x, float *deriv_vec_y, hipfftComplex *R2C_buffer, hipfftComplex *C2R_buffer, int Nx, int Ny){ hipfftExecR2C(plan_R2C, input, R2C_buffer); hipLaunchKernelGGL(( fftR2C_deriv1) , dim3(blocks), dim3(threadsperblock) , 0, 0, R2C_buffer, C2R_buffer,1, deriv_vec_x, Nx, Ny); hipfftExecC2R(plan_C2R, C2R_buffer, output_x); hipLaunchKernelGGL(( fftR2C_deriv1) , dim3(blocks), dim3(threadsperblock) , 0, 0, R2C_buffer, C2R_buffer,2, deriv_vec_y, Nx, Ny); hipfftExecC2R(plan_C2R, C2R_buffer, output_y); } void lap_fft(float *output, float *input, float *deriv_vec_x, float *deriv_vec_y, hipfftComplex *R2C_buffer, hipfftComplex *C2R_buffer, int Nx, int Ny){ hipfftExecR2C(plan_R2C, input, R2C_buffer); hipLaunchKernelGGL(( fftR2C_lap) , dim3(blocks), dim3(threadsperblock) , 0, 0, R2C_buffer, C2R_buffer, deriv_vec_x,deriv_vec_y, Nx, Ny); hipfftExecC2R(plan_C2R, C2R_buffer, output); } void div_fft(float *output, float *input_x, float *input_y, float *deriv_vec_x, float *deriv_vec_y, hipfftComplex *R2C_buffer, hipfftComplex *C2R_buffer, int Nx, int Ny){ //in-space transform, notice that input_x and input_y will change values when function is called hipfftExecR2C(plan_R2C, input_x, R2C_buffer); hipLaunchKernelGGL(( fftR2C_deriv1) , dim3(blocks), dim3(threadsperblock) , 0, 0, R2C_buffer, C2R_buffer, 1, deriv_vec_x, Nx, Ny); hipfftExecC2R(plan_C2R, C2R_buffer, input_x); hipfftExecR2C(plan_R2C, input_y, R2C_buffer); hipLaunchKernelGGL(( fftR2C_deriv1) , dim3(blocks), dim3(threadsperblock) , 0, 0, R2C_buffer, C2R_buffer, 2, deriv_vec_y, Nx, Ny); hipfftExecC2R(plan_C2R, C2R_buffer, input_y); hipLaunchKernelGGL(( add2matrix), dim3(blocks), dim3(threadsperblock), 0, 0, output, input_x, input_y); } void div_pst_fft(float *output, float *c_pst, float *rho, float coeff_diff, float *deriv_vec_x2, float *deriv_vec_y2, float *buffer_x, float *buffer_y, hipfftComplex *R2C_buffer, hipfftComplex *C2R_buffer, int Nx, int Ny){ //notice this differential process uses the 2-nd kind of derivative vector, corresponding to 2-nd order derivative grad_fft(buffer_x, buffer_y, rho, deriv_vec_x2, deriv_vec_y2, R2C_buffer, C2R_buffer, Nx, Ny); hipLaunchKernelGGL(( matrix_product), dim3(blocks), dim3(threadsperblock) , 0, 0, buffer_x, buffer_x, c_pst, coeff_diff, Nx, Ny); hipLaunchKernelGGL(( matrix_product), dim3(blocks), dim3(threadsperblock) , 0, 0, buffer_y, buffer_y, c_pst, coeff_diff, Nx, Ny); div_fft(output, buffer_x, buffer_y, deriv_vec_x2, deriv_vec_y2, R2C_buffer, C2R_buffer, Nx, Ny); } __global__ void div_pst_fd(float *output, float *c_pst, float *rho, float coeff_diff, float dx, float dy, int Nx, int Ny){ //finite differential of the diffusion term int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; int r_dx = i + 1; if (i == Nx - 1) r_dx = 0; r_dx = r_dx + j * Nx; int l_dx = i - 1; if (i == 0) l_dx = Nx - 1; l_dx = l_dx + j * Nx; int u_dy = j + 1; if (j == Ny - 1) u_dy = 0; u_dy = i + u_dy * Nx; int d_dy = j - 1; if (j == 0) d_dy = Ny - 1; d_dy = i + d_dy * Nx; output[index] = coeff_diff * ((c_pst[r_dx] + c_pst[index]) / 2.0f * (rho[r_dx] - rho[index]) / dx - (c_pst[l_dx] + c_pst[index]) / 2.0f * (rho[index] - rho[l_dx]) / dx) / dx + coeff_diff * ((c_pst[u_dy] + c_pst[index]) / 2.0f * (rho[u_dy] - rho[index]) / dy - (c_pst[d_dy] + c_pst[index]) / 2.0f * (rho[index] - rho[d_dy]) / dy) / dy; } void par_deriv1_fft(float *output, float *input, int dim, float *deriv_vec, hipfftComplex *R2C_buffer, hipfftComplex *C2R_buffer, int Nx, int Ny){ hipfftExecR2C(plan_R2C, input, R2C_buffer); if(dim == 1){ hipLaunchKernelGGL(( fftR2C_deriv1) , dim3(blocks), dim3(threadsperblock) , 0, 0, R2C_buffer, C2R_buffer,1, deriv_vec, Nx, Ny); hipfftExecC2R(plan_C2R, C2R_buffer, output); } if(dim == 2){ hipLaunchKernelGGL(( fftR2C_deriv1), dim3(blocks), dim3(threadsperblock) , 0, 0, R2C_buffer, C2R_buffer,2, deriv_vec, Nx, Ny); hipfftExecC2R(plan_C2R, C2R_buffer, output); } } __global__ void poissonR2C(hipfftComplex *input, hipfftComplex *output, float *coeff_x, float *coeff_y, float coeff0, float coeff2, int Nx, int Ny){ //equation of coeff0*u - coeff2*Delta(u) = f int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*(Nx / 2 + 1); if (i < (Nx / 2 + 1) && j < Ny){ float coeff = coeff_x[i] * coeff_x[i] + coeff_y[j] * coeff_y[j]; hipfftComplex temp = input[index]; output[index].x = temp.x / (coeff0 + coeff2 * coeff) / (float)Nx / (float)Ny; output[index].y = temp.y / (coeff0 + coeff2 * coeff) / (float)Nx / (float)Ny; } } void poisson_sol(float *sol, float *rhs, float coeff0, float coeff2, float *coeff_x, float *coeff_y, hipfftComplex *R2C_buffer, hipfftComplex *C2R_buffer, int Nx, int Ny){ hipfftExecR2C(plan_R2C, rhs, R2C_buffer); hipLaunchKernelGGL(( poissonR2C) , dim3(blocks), dim3(threadsperblock) , 0, 0, R2C_buffer, C2R_buffer, coeff_x, coeff_y, coeff0, coeff2, Nx, Ny); hipfftExecC2R(plan_C2R, C2R_buffer, sol); } /*----------------------------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------------------------*/
75196e2558db825a1bf4d6786fc2966c37941152.cu
/*The basic model of Danying's 2012 PNAS paper, without explicitly track the adhesion sites. Instead, using a rhoM dependent manner to handle the friction and then the traction force*/ //Feb-22-2018: Looks like for the diffusion term, spectral method does not work well. This is the only term that used finite differetiation in this code // may-23-2020: set the rhoa at front half and rhom at back half #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <device_functions.h> #include <cuda_runtime_api.h> #include "cublas_v2.h" #include "curand.h" #include "curand_kernel.h" #include <thrust/extrema.h> #include <thrust/device_ptr.h> #include <cufft.h> #include <stdio.h> #include<stdlib.h> #include"math.h" #include<algorithm> #include<random> #include<time.h> #define MIN(a,b) ((a<b) ? a:b) #define MAX(a,b) ((a>b) ? a:b) # define M_PI 3.14159265358979323846 /* pi */ #define BLOCK_SIZE_X 16 #define BLOCK_SIZE_Y 16 using namespace std; //simulation grid set up const int Nx = 256; const int Ny = 256; dim3 blocks(Nx / BLOCK_SIZE_X, Ny / BLOCK_SIZE_Y); dim3 threadsperblock(BLOCK_SIZE_X, BLOCK_SIZE_Y); //grid box const float Lx = 25.0f, Ly = 25.0f; const float dx = 2.0f * Lx / (float)Nx, dy = 2.0f * Ly / (float)Ny; //time steps float dt = 2e-3; //relaxation Gamma extern __constant__ float d_Gamma = 0.4f; //phase-field width extern __constant__ float d_epsilon = 2.0f; const float h_epsilon = 2.0f; //runing time and recording time float max_time = 200.0f; const float record_itvl = 10.0f; //intial condition float rhoAinitial = 1.0f; float rhoMinitial = 0.3f; //error control float poisson_err_limit = 0.01f; int poisson_max_steps = 100; float exceed_val = 2.0f; //the exceed value in constructing poisson eq extern __constant__ float d_exceed_val = 2.0f; const float lamda = 1e-4; //some global parameters float r0_cell = 10.0f; //radius of cell extern __constant__ float d_tension = 20.0f; //tension per unit area extern __constant__ float d_bending = 0.0f; //bending energy extern __constant__ float d_Mv = 200.0f; //volume conservation strength extern __constant__ float d_areaMin = 350.0f; //min-area size extern __constant__ float d_areaMax = 600.0f; // max-area size float nu_vis = 1000.0f; //friction and traction parameters float xi_fric = 0.5f; extern __constant__ float d_xiM = 0.0f; extern __constant__ float d_expRhoM = 0.1f; extern __constant__ float d_thresA = 0.5f; float h_diffRhoA = 0.8f; float h_diffRhoM0 = 2.0f; float h_KdRhoM = 0.5f; float h_RhoAtot = 350.0f; float h_kbRhoA = 10.0f; float h_kaRhoA = 0.01f; float h_kcRhoA = 10.0f; float h_K2aRhoA = 1.0f; float h_etaRhoA = 2500.0f; float h_etaRhoM = 200.0f; //cufft and cublas handles cufftHandle plan_R2C; cufftHandle plan_C2R; cublasHandle_t blashandle; cublasHandle_t h_blashandle; //utility functions void Initialize(float* phi, float *rhog, float *rhor, float* ux, float* uy, float* x, float* y); void writetofile(const char *name, float *u, int Nx, int Ny); void fftcoeffs(float *d_d1x, float *d_d1y, float *d_d2x, float *d_d2y); __global__ void absarray(float *absay, float *ax, float *ay, int Nx, int Ny); __global__ void add3matrix(float *output, float *input1, float *input2, float *input3); __global__ void add2matrix(float *output, float *input1, float *input2); __global__ void minus_matrix(float *output, float *minuend, float *substractor, int Nx, int Ny); __global__ void get_error(float *max_error, float *ux_new, float *ux_old, float *uy_new, float *uy_old, int *idx_eux, int *idx_ux, int *idx_euy, int *idx_uy, int Nx, int Ny); __global__ void matrix_product(float *output, float *input1, float *input2, float alpha, int Nx, int Ny); __global__ void xdir_center(float *xc_sin, float *xc_cos, float *phi, float *phi_area, float *x, float Lx, int m, int n); //derivative functions void par_deriv1_fft(float *output, float *input, int dim, float *deriv_vec, cufftComplex *R2C_buffer, cufftComplex *C2R_buffer, int Nx, int Ny); void grad_fft(float *output_x, float *output_y, float *input, float *deriv_vec_x, float *deriv_vec_y, cufftComplex *R2C_buffer, cufftComplex *C2R_buffer, int Nx, int Ny); void lap_fft(float *output, float *input, float *deriv_vec_x, float *deriv_vec_y, cufftComplex *R2C_buffer, cufftComplex *C2R_buffer, int Nx, int Ny); void div_fft(float *output, float *input_x, float *input_y, float *deriv_vec_x, float *deriv_vec_y, cufftComplex *R2C_buffer, cufftComplex *C2R_buffer, int Nx, int Ny); __global__ void fftR2C_deriv1(cufftComplex *input, cufftComplex *output, int dim, float *coeff, int Nx, int Ny); __global__ void fftR2C_lap(cufftComplex *input, cufftComplex *output, float *coeff_x, float *coeff_y, int Nx, int Ny); __global__ void poissonR2C(cufftComplex *input, cufftComplex *output, float *coeff_x, float *coeff_y, float coeff0, float coeff2, int Nx, int Ny); void poisson_sol(float *sol, float *rhs, float coeff0, float coeff2, float *coeff_x, float *coeff_y, cufftComplex *R2C_buffer, cufftComplex *C2R_buffer, int Nx, int Ny); void div_pst_fft(float *output, float *c_pst, float *rho, float coeff_diff, float *deriv_vec_x2, float *deriv_vec_y2, float *buffer_x, float *buffer_y, cufftComplex *R2C_buffer, cufftComplex *C2R_buffer, int Nx, int Ny); __global__ void div_pst_fd(float *output, float *c_pst, float *rho, float coeff_diff, float dx, float dy, int Nx, int Ny); //physical functions __global__ void curv(float *curv, float *phi_gradx, float *phi_grady, float *absdphi, int Nx, int Ny, float dx, float dy); __global__ void curv(float *curv, float *phi, float *absdphi, int m, int n, float dx, float dy); __global__ void phievolve(float *phi_new, float *phi_old, float *grad_x, float *grad_y, float *absdphi, float *lap_phi, float *curv, float *ux, float *uy, int Nx, int Ny, float dt); __global__ void potential_force(float *output, float *phi, float *lap_phi, float *bendcore, float *phiarea, float A0, float dx, float dy, int Nx, int Ny); __global__ void rhs_poisson(float *rhs_x, float *rhs_y, float *ptl_force, float *phi, float *dphix, float *dphiy, float *vis_x, float *vis_y, float *act_x, float *act_y, float *rhoA, float *rhoM, float *ux, float *uy, int Nx, int Ny); __global__ void div_vel_pre(float *div_x, float *div_y, int dim, float *phi, float nu, float *duxdx, float *duxdy, float *duydx, float *duydy, int Nx, int Ny); __global__ void bend_core(float *core, float *lap_phi, float *phi, int Nx, int Ny); __global__ void div_advect_pre(float *div_x, float *div_y, float *dnsty, float *rho, float *ux, float *uy, int Nx, int Ny); __global__ void diffRhoM(float *output, float *phi, float *rhoa, float Dm0, float Kd, int Nx, int Ny); __global__ void reactionRhoA(float *output, float *phi, float *rhoA, float *rhoAarea, float *phiarea, float rhoAtot, float kb, float K2a, float ka, float kc, float dx, float dy, int Nx, int Ny); __global__ void RD_evolve(float *rho_new, float *dnsty_new, float *dnsty_old, float *rho_old, float *advect, float *diffuse, float *react, float dt, float lamda, int Nx, int Ny); __global__ void RD_evolve(float *rho_new, float *dnsty_new, float *dnsty_old, float *rho_old, float *advect, float *diffuse, float dt, float lamda, int Nx, int Ny); __global__ void normRhoM(float *rho_new, float *rho_area_new, float *rho_are_old, int Nx, int Ny); __global__ void rhoa_right(float *rhoa, float *rhom, float *phi, float x_sin, float x_cos, float *x, float Lx, int m, int n); __global__ void activeForce(float *div_x, float *div_y, int dir, float *phi, float etaRhoA, float etaRhoM, float *rhoA, float *rhoM, float *dphix, float *dphiy, int Nx, int Ny); int main() { //initialize host FILE *ft = fopen("center_traj.txt", "w+"); float *h_x = (float *)malloc(Nx*sizeof(float)); float *h_y = (float *)malloc(Ny*sizeof(float)); float *h_phi = (float *)malloc(Nx*Ny*sizeof(float)); float *h_ux = (float *)malloc(Nx*Ny*sizeof(float)); float *h_uy = (float *)malloc(Nx*Ny*sizeof(float)); float *h_RhoA = (float *)malloc(Nx*Ny*sizeof(float)); float *h_RhoM = (float *)malloc(Nx*Ny*sizeof(float)); float h_xsin = 0.0f; float h_xcos = 0.0f; int file_record = 1; //initialize device float *d_x; cudaMalloc((void **)&d_x, Nx*sizeof(float)); float *d_y; cudaMalloc((void **)&d_y, Ny*sizeof(float)); float *d_phi_old; cudaMalloc((void **)&d_phi_old, Nx*Ny*sizeof(float)); float *d_phi_new; cudaMalloc((void **)&d_phi_new, Nx*Ny*sizeof(float)); float *d_ux_old; cudaMalloc((void **)&d_ux_old, Nx*Ny*sizeof(float)); float *d_uy_old; cudaMalloc((void **)&d_uy_old, Nx*Ny*sizeof(float)); float *d_ux_new; cudaMalloc((void **)&d_ux_new, Nx*Ny*sizeof(float)); float *d_uy_new; cudaMalloc((void **)&d_uy_new, Nx*Ny*sizeof(float)); float *d_RhoAold; cudaMalloc((void **)&d_RhoAold, Nx*Ny*sizeof(float)); float *d_RhoAnew; cudaMalloc((void **)&d_RhoAnew, Nx*Ny*sizeof(float)); float *d_RhoMold; cudaMalloc((void **)&d_RhoMold, Nx*Ny*sizeof(float)); float *d_RhoMnew; cudaMalloc((void **)&d_RhoMnew, Nx*Ny*sizeof(float)); //derivatives of velocities float *d_duxdx; cudaMalloc((void **)&d_duxdx, Nx*Ny*sizeof(float)); float *d_duxdy; cudaMalloc((void **)&d_duxdy, Nx*Ny*sizeof(float)); float *d_duydx; cudaMalloc((void **)&d_duydx, Nx*Ny*sizeof(float)); float *d_duydy; cudaMalloc((void **)&d_duydy, Nx*Ny*sizeof(float)); //divergence of the viscosity tensor float *d_div_x; cudaMalloc((void **)&d_div_x, Nx*Ny*sizeof(float)); float *d_div_y; cudaMalloc((void **)&d_div_y, Nx*Ny*sizeof(float)); //rhs of the poisson equation float *d_rhs_ux; cudaMalloc((void **)&d_rhs_ux, Nx*Ny*sizeof(float)); float *d_rhs_uy; cudaMalloc((void **)&d_rhs_uy, Nx*Ny*sizeof(float)); //substrate information //forces float *d_ActForceX; cudaMalloc((void **)&d_ActForceX, Nx*Ny*sizeof(float)); float *d_ActForceY; cudaMalloc((void **)&d_ActForceY, Nx*Ny*sizeof(float)); //forces in forms of potential * grad(phi) float *d_ptl_force; cudaMalloc((void **)&d_ptl_force, Nx*Ny*sizeof(float)); //define gradient, laplacian, curvature, Gprime, absgrad float *d_dphix; cudaMalloc((void **)&d_dphix, Nx*Ny*sizeof(float)); float *d_dphiy; cudaMalloc((void **)&d_dphiy, Nx*Ny*sizeof(float)); float *d_phi_absgrad; cudaMalloc((void **)&d_phi_absgrad, Nx*Ny*sizeof(float)); float *d_phi_lap; cudaMalloc((void **)&d_phi_lap, Nx*Ny*sizeof(float)); float *d_phi_curv; cudaMalloc((void **)&d_phi_curv, Nx*Ny*sizeof(float)); float *d_ftd1_x; cudaMalloc((void **)&d_ftd1_x, Nx*sizeof(float)); float *d_ftd1_y; cudaMalloc((void **)&d_ftd1_y, Ny*sizeof(float)); float *d_ftd2_x; cudaMalloc((void **)&d_ftd2_x, Nx*sizeof(float)); float *d_ftd2_y; cudaMalloc((void **)&d_ftd2_y, Ny*sizeof(float)); float *d_xc_sin; cudaMalloc((void **)&d_xc_sin, Nx*Ny*sizeof(float)); float *d_xc_cos; cudaMalloc((void **)&d_xc_cos, Nx*Ny*sizeof(float)); //temporary buffers float *d_temp_buffer; cudaMalloc((void **)&d_temp_buffer, Nx*Ny*sizeof(float)); //cublas buffer cufftComplex *d_fftR2C_buffer; cudaMalloc((void **)&d_fftR2C_buffer, (Nx / 2 + 1)*Ny*sizeof(cufftComplex)); //cufft R2C buffer as fft result cufftComplex *d_fftC2R_buffer; cudaMalloc((void **)&d_fftC2R_buffer, (Nx / 2 + 1)*Ny*sizeof(cufftComplex)); //as C2R buffer float *d_buffer_x; cudaMalloc((void **)&d_buffer_x, Nx*Ny*sizeof(float)); float *d_buffer_y; cudaMalloc((void **)&d_buffer_y, Nx*Ny*sizeof(float)); float *d_advect_buffer; cudaMalloc((void **)&d_advect_buffer, Nx*Ny*sizeof(float)); float *d_diffuse_buffer; cudaMalloc((void **)&d_diffuse_buffer, Nx*Ny*sizeof(float)); float *d_reaction_buffer; cudaMalloc((void **)&d_reaction_buffer, Nx*Ny*sizeof(float)); float *d_DiffRhoM; cudaMalloc((void **)&d_DiffRhoM, Nx*Ny*sizeof(float)); //the rhoa-dependent rhom diffusion constant float *d_rhoAarea; cudaMalloc((void **)&d_rhoAarea, sizeof(float)); //total active rhoA float *d_phiArea; cudaMalloc((void **)&d_phiArea, sizeof(float)); //total area of phi float *d_rhoMoldArea; cudaMalloc((void **)&d_rhoMoldArea, sizeof(float)); //total of rhoM old float *d_rhoMnewArea; cudaMalloc((void **)&d_rhoMnewArea, sizeof(float)); //total of rhoM new float *d_bendcore; cudaMalloc((void **)&d_bendcore,Nx*Ny*sizeof(float)); //the bending core //fft coefficients fftcoeffs(d_ftd1_x,d_ftd1_y, d_ftd2_x,d_ftd2_y); //handles cufftPlan2d(&plan_R2C, Nx, Ny, CUFFT_R2C); cufftPlan2d(&plan_C2R, Nx, Ny, CUFFT_C2R); cublasCreate(&blashandle); cublasSetPointerMode(blashandle, CUBLAS_POINTER_MODE_DEVICE); //make the cublas return value to device cublasCreate(&h_blashandle); //timer start cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //initialize the global parameters //r0_cell = 6.0f; Initialize(h_phi, h_RhoA, h_RhoM, h_ux, h_uy, h_x, h_y); //copy memory cudaMemcpy(d_x, h_x, Nx*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, h_y, Ny*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_phi_old, h_phi, Nx*Ny*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_phi_new, h_phi, Nx*Ny*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_RhoAold, h_RhoA, Nx*Ny*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_RhoMold, h_RhoM, Nx*Ny*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_RhoAnew, h_RhoA, Nx*Ny*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_RhoMnew, h_RhoM, Nx*Ny*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_ux_old, h_ux, Nx*Ny*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_uy_old, h_uy, Nx*Ny*sizeof(float), cudaMemcpyHostToDevice); //area size volume float A0; cublasSasum(h_blashandle, Nx*Ny, d_phi_old, 1, &A0); A0 = A0 * dx * dy; //error parameters float max_error; int iter_steps; int *idx_maxerr_ux; cudaMalloc((void **)& idx_maxerr_ux, sizeof(int)); int *idx_max_ux; cudaMalloc((void **)&idx_max_ux, sizeof(int)); int *idx_maxerr_uy; cudaMalloc((void **)& idx_maxerr_uy, sizeof(int)); int *idx_max_uy; cudaMalloc((void **)&idx_max_uy, sizeof(int)); float *d_max_error; cudaMalloc((void **)&d_max_error, sizeof(float)); //mass center grad_fft(d_dphix, d_dphiy, d_phi_old, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); absarray <<<blocks, threadsperblock >>>(d_phi_absgrad, d_dphix, d_dphiy, Nx, Ny); lap_fft(d_phi_lap, d_phi_old, d_ftd2_x, d_ftd2_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); for (int steps = 0; steps<(int)(max_time / dt); steps++){ //curv <<<blocks, threadsperblock >>>(d_phi_curv, d_dphix, d_dphiy, d_phi_absgrad, Nx, Ny, dx,dy); curv <<<blocks, threadsperblock >>>(d_phi_curv, d_phi_old, d_phi_absgrad, Nx, Ny, dx,dy); //solve phi phievolve <<<blocks, threadsperblock >>>(d_phi_new, d_phi_old, d_dphix, d_dphiy, d_phi_absgrad, d_phi_lap, d_phi_curv, d_ux_old, d_uy_old, Nx, Ny, dt); //update derivatives grad_fft(d_dphix, d_dphiy, d_phi_new, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); absarray <<<blocks, threadsperblock >>>(d_phi_absgrad, d_dphix, d_dphiy, Nx, Ny); lap_fft(d_phi_lap, d_phi_new, d_ftd2_x, d_ftd2_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); //update rhoA and rhoM //rhoA advection //div_advect_pre<<<blocks, threadsperblock >>>(d_div_x, d_div_y, d_phi_old, d_RhoAold, d_ux_old, d_uy_old, Nx, Ny); //div_fft(d_advect_buffer, d_div_x, d_div_y, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); //rhoA diffusion //div_pst_fd<<<blocks, threadsperblock >>>(d_diffuse_buffer, d_phi_old, d_RhoAold, h_diffRhoA, dx, dy, Nx, Ny); //rhoA reaction cublasSasum(blashandle, Nx * Ny, d_phi_old, 1, d_phiArea); //matrix_product<<<blocks, threadsperblock >>>(d_temp_buffer, d_RhoAold, d_phi_old, 1.0f, Nx, Ny); //cublasSasum(blashandle, Nx * Ny, d_temp_buffer, 1, d_rhoAarea); //reactionRhoA<<<blocks, threadsperblock >>>(d_reaction_buffer, d_phi_old, d_RhoAold, d_rhoAarea, d_phiArea, h_RhoAtot, h_kbRhoA, h_K2aRhoA, h_kaRhoA, h_kcRhoA, dx, dy, Nx, Ny); //time evolve //RD_evolve<<<blocks, threadsperblock >>>(d_RhoAnew, d_phi_new, d_phi_old, d_RhoAold, d_advect_buffer, d_diffuse_buffer, d_reaction_buffer, dt, lamda, Nx, Ny); rhoa_right<<<blocks, threadsperblock>>>(d_RhoAold, d_RhoMold, d_phi_old, h_xsin, h_xcos, d_x, Lx, Nx, Ny); rhoa_right<<<blocks, threadsperblock>>>(d_RhoAnew, d_RhoMnew, d_phi_new, h_xsin, h_xcos, d_x, Lx, Nx, Ny); //rhoM advection //div_advect_pre<<<blocks, threadsperblock >>>(d_div_x, d_div_y, d_phi_old, d_RhoMold, d_ux_old, d_uy_old, Nx, Ny); //div_fft(d_advect_buffer, d_div_x, d_div_y, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); //rhoM diffusion //diffRhoM<<<blocks, threadsperblock >>>(d_DiffRhoM, d_phi_old, d_RhoAold, h_diffRhoM0, h_KdRhoM, Nx, Ny); //div_pst_fd<<<blocks, threadsperblock >>>(d_diffuse_buffer, d_DiffRhoM, d_RhoMold, 1.0f, dx, dy, Nx, Ny); //no rhoM reaction //RD_evolve<<<blocks, threadsperblock >>>(d_RhoMnew, d_phi_new, d_phi_old, d_RhoMold, d_advect_buffer, d_diffuse_buffer, dt, lamda, Nx, Ny); //normRhoM rhoM //matrix_product<<<blocks, threadsperblock>>>(d_temp_buffer, d_RhoMold, d_phi_old, dx * dy, Nx, Ny); //cublasSasum(blashandle, Nx * Ny, d_temp_buffer, 1, d_rhoMoldArea); //matrix_product<<<blocks, threadsperblock>>>(d_temp_buffer, d_RhoMnew, d_phi_new, dx * dy, Nx, Ny); //cublasSasum(blashandle, Nx * Ny, d_temp_buffer, 1, d_rhoMnewArea); //normRhoM<<<blocks, threadsperblock>>>(d_RhoMnew, d_rhoMnewArea, d_rhoMoldArea, Nx, Ny); //only rhoA at the right //update forces bend_core<<<blocks, threadsperblock>>>(d_bendcore, d_phi_lap, d_phi_new, Nx, Ny); lap_fft(d_bendcore, d_bendcore, d_ftd2_x, d_ftd2_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); potential_force<<<blocks, threadsperblock >>>(d_ptl_force, d_phi_new, d_phi_lap, d_bendcore, d_phiArea, A0, dx, dy, Nx, Ny); //active force activeForce<<<blocks, threadsperblock>>>(d_div_x, d_div_y, 1, d_phi_new, h_etaRhoA, h_etaRhoM, d_RhoAnew, d_RhoMnew, d_dphix, d_dphiy, Nx, Ny); div_fft(d_ActForceX, d_div_x, d_div_y, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); activeForce<<<blocks, threadsperblock>>>(d_div_x, d_div_y, 2, d_phi_new, h_etaRhoA, h_etaRhoM, d_RhoAnew, d_RhoMnew, d_dphix, d_dphiy, Nx, Ny); div_fft(d_ActForceY, d_div_x, d_div_y, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); max_error = 10.0f; iter_steps = 0; while(max_error > poisson_err_limit && iter_steps < poisson_max_steps){ //grad(ux) grad_fft(d_duxdx, d_duxdy, d_ux_old, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); //grad(uy) grad_fft(d_duydx, d_duydy, d_uy_old, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); //grad(uz) //vel_div_x div_vel_pre<<<blocks, threadsperblock >>>(d_div_x, d_div_y, 1, d_phi_new, nu_vis, d_duxdx, d_duxdy, d_duydx, d_duydy, Nx, Ny); div_fft(d_buffer_x, d_div_x, d_div_y, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); //vel_div_y div_vel_pre<<<blocks, threadsperblock >>>(d_div_x, d_div_y, 2, d_phi_new, nu_vis, d_duxdx, d_duxdy, d_duydx, d_duydy, Nx, Ny); div_fft(d_buffer_y, d_div_x, d_div_y, d_ftd1_x, d_ftd1_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); rhs_poisson<<<blocks, threadsperblock >>>(d_rhs_ux, d_rhs_uy, d_ptl_force, d_phi_new, d_dphix, d_dphiy, d_buffer_x, d_buffer_y, d_ActForceX, d_ActForceY, d_RhoAnew, d_RhoMnew, d_ux_old, d_uy_old, Nx, Ny); poisson_sol(d_ux_new, d_rhs_ux, xi_fric, nu_vis*exceed_val, d_ftd2_x, d_ftd2_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); poisson_sol(d_uy_new, d_rhs_uy, xi_fric, nu_vis*exceed_val, d_ftd2_x, d_ftd2_y, d_fftR2C_buffer, d_fftC2R_buffer, Nx, Ny); //modify ux uy uz in regions of phi < 0.0001 not done //get error minus_matrix<<<blocks, threadsperblock >>>(d_buffer_x, d_ux_new, d_ux_old, Nx, Ny); minus_matrix<<<blocks, threadsperblock >>>(d_buffer_y, d_uy_new, d_uy_old, Nx, Ny); cublasIsamax(blashandle, Nx*Ny, d_buffer_x, 1, idx_maxerr_ux); cublasIsamax(blashandle, Nx*Ny, d_ux_new, 1, idx_max_ux); cublasIsamax(blashandle, Nx*Ny, d_buffer_y, 1, idx_maxerr_uy); cublasIsamax(blashandle, Nx*Ny, d_uy_new, 1, idx_max_uy); get_error<<<1, 1>>>(d_max_error, d_ux_new, d_ux_old, d_uy_new, d_uy_old, idx_maxerr_ux, idx_max_ux, idx_maxerr_uy, idx_max_uy, Nx, Ny); cudaMemcpy(&max_error, d_max_error, sizeof(float), cudaMemcpyDeviceToHost); iter_steps++; cublasScopy(blashandle, Nx*Ny, d_ux_new, 1, d_ux_old, 1); cublasScopy(blashandle, Nx*Ny, d_uy_new, 1, d_uy_old, 1); } if(max_error > poisson_err_limit && steps > 500){ printf("step %f wrong err %f\n", steps * dt, max_error); break; } //record center if(steps % 500 == 0){ xdir_center<<<blocks, threadsperblock>>>(d_xc_sin, d_xc_cos, d_phi_new, d_phiArea, d_x, Lx, Nx, Ny); thrust::device_ptr<float> d_sin = thrust::device_pointer_cast(d_xc_sin); h_xsin = thrust::reduce(d_sin, d_sin + Nx*Ny); thrust::device_ptr<float> d_cos = thrust::device_pointer_cast(d_xc_cos); h_xcos = thrust::reduce(d_cos, d_cos + Nx*Ny); fprintf(ft, "%f %f\n", steps*dt, atan2(h_xsin, h_xcos)/M_PI*Lx); } if(steps % (int)(record_itvl/dt)==0 && steps > 0){ char phi_name[50];sprintf(phi_name,"phi_profile_%d.txt", file_record); cudaMemcpy(h_phi, d_phi_old, Nx*Ny*sizeof(float), cudaMemcpyDeviceToHost); writetofile(phi_name, h_phi, Nx, Ny); char rhoA_name[50];sprintf(rhoA_name,"rhoA_profile_%d.txt",file_record); cudaMemcpy(h_RhoA, d_RhoAold, Nx*Ny*sizeof(float), cudaMemcpyDeviceToHost); writetofile(rhoA_name, h_RhoA, Nx, Ny); char rhoM_name[50];sprintf(rhoM_name,"rhoM_profile_%d.txt",file_record); cudaMemcpy(h_RhoM, d_RhoMold, Nx*Ny*sizeof(float), cudaMemcpyDeviceToHost); writetofile(rhoM_name, h_RhoM, Nx, Ny); char ux_name[50];sprintf(ux_name,"ux_profile_%d.txt",file_record); cudaMemcpy(h_ux, d_ux_old, Nx*Ny*sizeof(float), cudaMemcpyDeviceToHost); writetofile(ux_name, h_ux, Nx, Ny); char uy_name[50];sprintf(uy_name,"uy_profile_%d.txt",file_record); cudaMemcpy(h_uy, d_uy_old, Nx*Ny*sizeof(float), cudaMemcpyDeviceToHost); writetofile(uy_name, h_uy, Nx, Ny); file_record++; } //swap old and new cublasScopy(blashandle, Nx*Ny, d_phi_new, 1, d_phi_old, 1); cublasScopy(blashandle, Nx*Ny, d_RhoAnew, 1, d_RhoAold, 1); cublasScopy(blashandle, Nx*Ny, d_RhoMnew, 1, d_RhoMold, 1); } //final record of the shape char phi_name[50]; sprintf(phi_name,"phi_profile.txt", file_record); cudaMemcpy(h_phi, d_phi_old, Nx*Ny*sizeof(float), cudaMemcpyDeviceToHost); writetofile(phi_name, h_phi, Nx, Ny); char rhoA_name[50]; sprintf(rhoA_name,"rhoA_profile.txt",file_record); cudaMemcpy(h_RhoA, d_RhoAold, Nx*Ny*sizeof(float), cudaMemcpyDeviceToHost); writetofile(rhoA_name, h_RhoA, Nx, Ny); char rhoM_name[50]; sprintf(rhoM_name,"rhoM_profile.txt",file_record); cudaMemcpy(h_RhoM, d_RhoMold, Nx*Ny*sizeof(float), cudaMemcpyDeviceToHost); writetofile(rhoM_name, h_RhoM, Nx, Ny); char ux_name[50];sprintf(ux_name,"ux_profile.txt",file_record); cudaMemcpy(h_ux, d_ux_old, Nx*Ny*sizeof(float), cudaMemcpyDeviceToHost); writetofile(ux_name, h_ux, Nx, Ny); char uy_name[50];sprintf(uy_name,"uy_profile.txt",file_record); cudaMemcpy(h_uy, d_uy_old, Nx*Ny*sizeof(float), cudaMemcpyDeviceToHost); writetofile(uy_name, h_uy, Nx, Ny); //destroy plan and handles cufftDestroy(plan_R2C); cufftDestroy(plan_C2R); cublasDestroy(blashandle); cublasDestroy(h_blashandle); fclose(ft); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsetime; cudaEventElapsedTime(&elapsetime, start, stop); std::printf("time need: %f s \n", elapsetime / 1000); return 0; } void Initialize(float* phi, float *rhoA, float *rhoM, float* ux, float* uy, float* x, float* y){ for (int i = 0; i < Nx; i++)x[i] = -Lx + 2 * Lx / (float)Nx*(float)i; for (int i = 0; i < Ny; i++)y[i] = -Ly + 2 * Ly / (float)Ny*(float)i; for (int i = 0; i < Nx; i++){ for (int j = 0; j < Ny; j++){ float dis = sqrt(x[i]*x[i] + y[j]*y[j]); int index = i + j * Nx; phi[index]=0.5f + 0.5f * tanh(3.0f*(r0_cell-dis) / h_epsilon); rhoA[index] = 0.0f; if(x[i] > 0.0f){ rhoA[index] = rhoAinitial * phi[index]; } rhoM[index] = rhoMinitial * phi[index]; ux[index] = 0.0f; uy[index] = 0.0f; } } } void writetofile(const char *name, float *u, int Nx, int Ny){ FILE *fp = fopen(name, "w+"); for (int j = 0; j < Ny; j++){ for (int i = 0; i < Nx; i++){ fprintf(fp, "%f ", u[i + j*Nx]); } fprintf(fp, "\n"); } fclose(fp); } __global__ void phievolve(float *phi_new, float *phi_old, float *grad_x, float *grad_y, float *absdphi, float *lap_phi, float *curv, float *ux, float *uy, int Nx, int Ny, float dt){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; float dG = 36.0f * phi_old[index] * (1.0f - phi_old[index]) * (1.0f - 2.0f * phi_old[index]); phi_new[index] = phi_old[index] + dt*(-ux[index] * grad_x[index] - uy[index] * grad_y[index] + d_Gamma*(d_epsilon*lap_phi[index] - dG / d_epsilon + curv[index] * d_epsilon * absdphi[index])); } __global__ void div_vel_pre(float *div_x, float *div_y, int dim, float *phi, float nu, float *duxdx, float *duxdy, float *duydx, float *duydy, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; if(dim == 1){ div_x[index] = 2.0f * nu * phi[index] * duxdx[index] - nu * d_exceed_val * duxdx[index]; //sigma_xx div_y[index] = nu * phi[index] * (duxdy[index] + duydx[index]) - nu * d_exceed_val * duxdy[index]; //sigma_xy } if(dim == 2){ div_x[index] = nu * phi[index] *(duydx[index] + duxdy[index]) - nu * d_exceed_val * duydx[index]; //sigma_yx div_y[index] = 2.0f * nu * phi[index] * duydy[index] - nu * d_exceed_val * duydy[index]; //sigma_yy } } __global__ void potential_force(float *output, float *phi, float *lap_phi, float *bendcore, float *phiarea, float A0, float dx, float dy, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; float dGphi = 36.0f * phi[index] * (1.0f - phi[index]) * (1.0f - 2.0f*phi[index]); float ddGphi = 36.0f*(1.0f - 6.0f*phi[index] + 6.0f*phi[index] * phi[index]); float core = lap_phi[index] - dGphi / d_epsilon / d_epsilon; float current_area = *phiarea * dx * dy; float delta_area = 0.0f; if(current_area < d_areaMin) delta_area = current_area - d_areaMin; if(current_area > d_areaMax) delta_area = current_area - d_areaMax; output[index] = -d_tension * d_epsilon * core + d_bending * d_epsilon * (bendcore[index] - ddGphi * core / d_epsilon / d_epsilon) + d_Mv * delta_area; } __global__ void rhs_poisson(float *rhs_x, float *rhs_y, float *ptl_force, float *phi, float *dphix, float *dphiy, float *vis_x, float *vis_y, float *act_x, float *act_y, float *rhoA, float *rhoM, float *ux, float *uy, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; rhs_x[index] = ptl_force[index] * dphix[index] + act_x[index] + vis_x[index]; rhs_y[index] = ptl_force[index] * dphiy[index] + act_y[index] + vis_y[index]; } __global__ void get_error(float *max_error, float *ux_new, float *ux_old, float *uy_new, float *uy_old, int *idx_eux, int *idx_ux, int *idx_euy, int *idx_uy, int Nx, int Ny){ float err_ux = abs(ux_new[*idx_eux - 1] - ux_old[*idx_eux - 1]) / abs(ux_new[*idx_ux - 1]); float err_uy = abs(uy_new[*idx_euy - 1] - uy_old[*idx_euy - 1]) / abs(uy_new[*idx_uy - 1]); *max_error = MAX(err_ux, err_uy); } __global__ void div_advect_pre(float *div_x, float *div_y, float *dnsty, float *rho, float *ux, float *uy, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; //advection on the interface div_x[index] = dnsty[index] * rho[index] * ux[index]; div_y[index] = dnsty[index] * rho[index] * uy[index]; } __global__ void RD_evolve(float *rho_new, float *dnsty_new, float *dnsty_old, float *rho_old, float *advect, float *diffuse, float *react, float dt, float lamda, int Nx, int Ny){ //with reaction terms int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; rho_new[index] = 0.0f; if(dnsty_old[index] > lamda){ rho_new[index] = (2.0f * dnsty_old[index] - dnsty_new[index]) / dnsty_old[index] * rho_old[index] + dt / dnsty_old[index] *(diffuse[index] - advect[index] + react[index]); } } __global__ void RD_evolve(float *rho_new, float *dnsty_new, float *dnsty_old, float *rho_old, float *advect, float *diffuse, float dt, float lamda, int Nx, int Ny){ //without reaction terms int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; rho_new[index] = 0.0f; if(dnsty_old[index] > lamda){ rho_new[index] = (2.0f * dnsty_old[index] - dnsty_new[index]) / dnsty_old[index] * rho_old[index] + dt / dnsty_old[index] *(diffuse[index] - advect[index]); } } __global__ void normRhoM(float *rho, float *rho_area_new, float *rho_are_old, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; rho[index] = rho[index] * (*rho_are_old) / (*rho_area_new); } __global__ void diffRhoM(float *output, float *phi, float *rhoa, float Dm0, float Kd, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; output[index] = phi[index] * Dm0 / (1.0f + rhoa[index] / Kd); } __global__ void reactionRhoA(float *output, float *phi, float *rhoA, float *rhoAarea, float *phiarea, float rhoAtot, float kb, float K2a, float ka, float kc, float dx, float dy, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; float rhoAcyt = (rhoAtot - (*rhoAarea) * dx * dy) / ((*phiarea) * dx * dy); output[index] = phi[index] * (kb * (rhoA[index] * rhoA[index] / (K2a * K2a + rhoA[index] * rhoA[index]) + ka) * rhoAcyt - kc * rhoA[index]); } __global__ void bend_core(float *core, float *lap_phi, float *phi, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; if (i < Nx && j < Ny){ core[index]=lap_phi[index]-36.0f*phi[index]*(1.0f-phi[index])*(1.0f-2.0f*phi[index])/d_epsilon/d_epsilon; } } __global__ void activeForce(float *div_x, float *div_y, int dir, float *phi, float etaRhoA, float etaRhoM, float *rhoA, float *rhoM, float *dphix, float *dphiy, int Nx, int Ny){ //active force by rhoA and rhoM int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; //x-dir if(dir == 1){ div_x[index] = -etaRhoA * d_epsilon * phi[index] * rhoA[index] * dphix[index] * dphix[index] + etaRhoM * phi[index] * (1.0f - rhoA[index]); div_y[index] = -etaRhoA * d_epsilon * phi[index] * rhoA[index] * dphix[index] * dphiy[index]; } //y-dir if(dir == 2){ div_x[index] = -etaRhoA * d_epsilon * phi[index] * rhoA[index] * dphix[index] * dphiy[index]; div_y[index] = -etaRhoA * d_epsilon * phi[index] * rhoA[index] * dphiy[index] * dphiy[index] + etaRhoM * phi[index] * (1.0f - rhoA[index]); } } /*--------------------------------------------------------------------------------------*/ /*Utility functions*/ /*--------------------------------------------------------------------------------------*/ __global__ void matrix_product(float *output, float *input1, float *input2, float alpha, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; output[index] = alpha * input1[index] * input2[index]; } __global__ void add3matrix(float *output, float *input1, float *input2, float *input3){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; output[index] = input1[index] + input2[index] + input3[index]; } __global__ void add2matrix(float *output, float *input1, float *input2){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; output[index] = input1[index] + input2[index]; } __global__ void absarray(float *absay, float *ax, float *ay, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*Nx; absay[index] = sqrt(ax[index] * ax[index] + ay[index] * ay[index]); } __global__ void minus_matrix(float *output, float *minuend, float *substractor, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; output[index] = minuend[index] - substractor[index]; } __global__ void xdir_center(float *xc_sin, float *xc_cos, float *phi, float *phi_area, float *x, float Lx, int m, int n){ //return the matrix of phi*sin(x*pi/Lx), phi*cos(x*phi/Lx) int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*blockDim.x*gridDim.x; if(i< m && j<n){ xc_sin[index] = phi[index] / (*phi_area) * sin(x[i] * M_PI / Lx); xc_cos[index] = phi[index] / (*phi_area) * cos(x[i] * M_PI / Lx); } } __global__ void rhoa_right(float *rhoa, float *rhom, float *phi, float x_sin, float x_cos, float *x, float Lx, int m, int n){ //put rhoa at the right half plane of mass centern int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*m; if( i < m && j < n){ rhoa[index] = 0.0f; rhom[index] = 1.0f; if(sin(x[i]*M_PI/Lx) * x_cos - cos(x[i]*M_PI/Lx)*x_sin > -0.5f && phi[index] > 1e-4){ rhoa[index] = 1.0f; rhom[index] = 0.0f; } } } /*----------------------------------------------------------------------------------------------*/ /*Don't Change The Code After This Line if You Don't Want to Change the Differentiation Methods*/ /*----------------------------------------------------------------------------------------------*/ __global__ void curv(float *curv, float *phi_gradx, float *phi_grady, float *absdphi, int Nx, int Ny, float dx, float dy){ //from gradient obtained by fft int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*Nx; //f: forward; b:backward int fdx = i + 1; if (i == Nx - 1) fdx = 0; fdx = fdx + j*Nx; int bdx = i - 1; if (i == 0) bdx = Nx - 1; bdx = bdx + j*Nx; int fdy = j + 1; if (j == Ny - 1) fdy = 0; fdy = i + fdy*Nx; int bdy = j - 1; if (j == 0) bdy = Ny - 1; bdy = i + bdy*Nx; curv[index] = 0.0f; if (absdphi[index] >= 0.01 && absdphi[fdx] >= 0.01 && absdphi[bdx] >= 0.01 && absdphi[fdy] >= 0.01 && absdphi[bdy] >= 0.01) curv[index] = -(phi_gradx[fdx] / absdphi[fdx] - phi_gradx[bdx] / absdphi[bdx]) / 2.0f / dx - (phi_grady[fdy] / absdphi[fdy] - phi_grady[bdy] / absdphi[bdy]) / 2.0f / dy; } __global__ void curv(float *curv, float *phi, float *absdphi, int m, int n, float dx, float dy){ //from gradient obtained by fft int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*m; if(i<m && j<n){ curv[index] = 0.0f; if(absdphi[index]>0.001f){ int r_dx = i + 1; if (i == m - 1) r_dx = 0; int l_dx = i - 1; if (i == 0) l_dx = m - 1; int u_dy = j + 1; if (j == n - 1) u_dy = 0; int d_dy = j - 1; if (j == 0) d_dy = n - 1; //geth the 9 points needed for curvature calculation float phi_i_j=phi[index]; //(i,j) float phi_ip1_j=phi[r_dx+j*m]; //(i+1,j) float phi_im1_j=phi[l_dx+j*m]; //(i-1,j) float phi_i_jp1=phi[i+u_dy*m]; //(i,j+1) float phi_i_jm1=phi[i+d_dy*m]; //(i,j-1) float phi_ip1_jp1=phi[r_dx+u_dy*m]; //(i+1,j+1) float phi_ip1_jm1=phi[r_dx+d_dy*m]; //(i+1,j-1) float phi_im1_jp1=phi[l_dx+u_dy*m]; //(i-1,j+1) float phi_im1_jm1=phi[l_dx+d_dy*m]; //(i-1,j-1) float phix_iphalf_j = (phi_ip1_j - phi_i_j )/dx; float phix_imhalf_j = (phi_i_j - phi_im1_j)/dx; float phiy_i_jphalf = (phi_i_jp1 - phi_i_j )/dy; float phiy_i_jmhalf = (phi_i_j - phi_i_jm1)/dy; float phiy_iphalf_j = (phi_ip1_jp1 + phi_i_jp1 - phi_ip1_jm1 - phi_i_jm1 )/(4.0f*dy); float phiy_imhalf_j = (phi_i_jp1 + phi_im1_jp1 - phi_i_jm1 - phi_im1_jm1)/(4.0f*dy); float phix_i_jphalf = (phi_ip1_jp1 + phi_ip1_j - phi_im1_jp1 - phi_im1_j )/(4.0f*dx); float phix_i_jmhalf = (phi_ip1_j + phi_ip1_jm1 - phi_im1_j - phi_im1_jm1)/(4.0f*dx); float grad_phi_abs_iphalf_j = sqrt( phix_iphalf_j * phix_iphalf_j + phiy_iphalf_j * phiy_iphalf_j ); float grad_phi_abs_imhalf_j = sqrt( phix_imhalf_j * phix_imhalf_j + phiy_imhalf_j * phiy_imhalf_j); float grad_phi_abs_i_jphalf = sqrt( phix_i_jphalf * phix_i_jphalf + phiy_i_jphalf * phiy_i_jphalf ); float grad_phi_abs_i_jmhalf = sqrt( phix_i_jmhalf * phix_i_jmhalf + phiy_i_jmhalf * phiy_i_jmhalf ); curv[index] = - ( phix_iphalf_j / grad_phi_abs_iphalf_j - phix_imhalf_j / grad_phi_abs_imhalf_j )/dx - ( phiy_i_jphalf / grad_phi_abs_i_jphalf - phiy_i_jmhalf / grad_phi_abs_i_jmhalf )/dy; } } } void fftcoeffs(float *d_d1x, float *d_d1y, float *d_d2x, float *d_d2y){ //FFT coefficients, d_d1x, d_d1y are device 1-st derivatie grid matrixes, d_d2x, d_d2y are device 2-nd derivative matrix float *h_kx2 = (float *)malloc(Nx*sizeof(float)); for (int i = 0; i <= Nx / 2; i++) h_kx2[i] = (float)i * M_PI / Lx; for (int i = Nx / 2 + 1; i < Nx; i++) h_kx2[i] = ((float)i - (float)Nx) * M_PI / Lx; cudaMemcpy(d_d2x, h_kx2, Nx*sizeof(float), cudaMemcpyHostToDevice); float *h_ky2 = (float *)malloc(Ny*sizeof(float)); for (int i = 0; i <= Ny / 2; i++) h_ky2[i] = (float)i * M_PI / Ly; for (int i = Ny / 2 + 1; i < Ny; i++) h_ky2[i] = ((float)i - (float)Ny) * M_PI / Ly; cudaMemcpy(d_d2y, h_ky2, Ny*sizeof(float), cudaMemcpyHostToDevice); float *h_kx1 = (float *)malloc(Nx*sizeof(float)); for (int i = 0; i < Nx / 2; i++) h_kx1[i] = (float)i * M_PI / Lx; h_kx1[Nx / 2] = 0.0f; for (int i = Nx / 2 + 1; i < Nx; i++) h_kx1[i] = ((float)i - (float)Nx) * M_PI / Lx; cudaMemcpy(d_d1x, h_kx1, Nx*sizeof(float), cudaMemcpyHostToDevice); float *h_ky1 = (float *)malloc(Ny*sizeof(float)); for (int i = 0; i < Ny / 2; i++)h_ky1[i] = (float)i * M_PI / Ly; h_ky1[Ny / 2] = 0.0f; for (int i = Ny / 2 + 1; i < Ny; i++) h_ky1[i] = ((float)i - (float)Ny) * M_PI / Ly; cudaMemcpy(d_d1y, h_ky1, Ny*sizeof(float), cudaMemcpyHostToDevice); free(h_kx1); free(h_kx2); free(h_ky1); free(h_ky2); } __global__ void fftR2C_deriv1(cufftComplex *input, cufftComplex *output, int dim, float *coeff, int Nx, int Ny){ //x:dim=1; y:dim=2; int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*(Nx / 2 + 1); int dir[2]={i,j}; if (i < (Nx / 2 + 1) && j < Ny){ cufftComplex temp = input[index]; output[index].x = -temp.y*coeff[dir[dim-1]] / (float)Nx / (float)Ny; output[index].y = temp.x*coeff[dir[dim-1]] / (float)Nx / (float)Ny; } } __global__ void fftR2C_lap(cufftComplex *input, cufftComplex *output, float *coeff_x, float *coeff_y, int Nx, int Ny){ int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*(Nx / 2 + 1); if (i < (Nx / 2 + 1) && j < Ny){ cufftComplex temp = input[index]; float coeff = coeff_x[i] * coeff_x[i] + coeff_y[j] * coeff_y[j]; output[index].x = -temp.x*coeff / (float)Nx / (float)Ny; output[index].y = -temp.y*coeff / (float)Nx / (float)Ny; } } void grad_fft(float *output_x, float *output_y, float *input, float *deriv_vec_x, float *deriv_vec_y, cufftComplex *R2C_buffer, cufftComplex *C2R_buffer, int Nx, int Ny){ cufftExecR2C(plan_R2C, input, R2C_buffer); fftR2C_deriv1 <<<blocks, threadsperblock >>>(R2C_buffer, C2R_buffer,1, deriv_vec_x, Nx, Ny); cufftExecC2R(plan_C2R, C2R_buffer, output_x); fftR2C_deriv1 <<<blocks, threadsperblock >>>(R2C_buffer, C2R_buffer,2, deriv_vec_y, Nx, Ny); cufftExecC2R(plan_C2R, C2R_buffer, output_y); } void lap_fft(float *output, float *input, float *deriv_vec_x, float *deriv_vec_y, cufftComplex *R2C_buffer, cufftComplex *C2R_buffer, int Nx, int Ny){ cufftExecR2C(plan_R2C, input, R2C_buffer); fftR2C_lap <<<blocks, threadsperblock >>>(R2C_buffer, C2R_buffer, deriv_vec_x,deriv_vec_y, Nx, Ny); cufftExecC2R(plan_C2R, C2R_buffer, output); } void div_fft(float *output, float *input_x, float *input_y, float *deriv_vec_x, float *deriv_vec_y, cufftComplex *R2C_buffer, cufftComplex *C2R_buffer, int Nx, int Ny){ //in-space transform, notice that input_x and input_y will change values when function is called cufftExecR2C(plan_R2C, input_x, R2C_buffer); fftR2C_deriv1 <<<blocks, threadsperblock >>>(R2C_buffer, C2R_buffer, 1, deriv_vec_x, Nx, Ny); cufftExecC2R(plan_C2R, C2R_buffer, input_x); cufftExecR2C(plan_R2C, input_y, R2C_buffer); fftR2C_deriv1 <<<blocks, threadsperblock >>>(R2C_buffer, C2R_buffer, 2, deriv_vec_y, Nx, Ny); cufftExecC2R(plan_C2R, C2R_buffer, input_y); add2matrix<<<blocks, threadsperblock>>>(output, input_x, input_y); } void div_pst_fft(float *output, float *c_pst, float *rho, float coeff_diff, float *deriv_vec_x2, float *deriv_vec_y2, float *buffer_x, float *buffer_y, cufftComplex *R2C_buffer, cufftComplex *C2R_buffer, int Nx, int Ny){ //notice this differential process uses the 2-nd kind of derivative vector, corresponding to 2-nd order derivative grad_fft(buffer_x, buffer_y, rho, deriv_vec_x2, deriv_vec_y2, R2C_buffer, C2R_buffer, Nx, Ny); matrix_product<<<blocks, threadsperblock >>>(buffer_x, buffer_x, c_pst, coeff_diff, Nx, Ny); matrix_product<<<blocks, threadsperblock >>>(buffer_y, buffer_y, c_pst, coeff_diff, Nx, Ny); div_fft(output, buffer_x, buffer_y, deriv_vec_x2, deriv_vec_y2, R2C_buffer, C2R_buffer, Nx, Ny); } __global__ void div_pst_fd(float *output, float *c_pst, float *rho, float coeff_diff, float dx, float dy, int Nx, int Ny){ //finite differential of the diffusion term int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j * Nx; int r_dx = i + 1; if (i == Nx - 1) r_dx = 0; r_dx = r_dx + j * Nx; int l_dx = i - 1; if (i == 0) l_dx = Nx - 1; l_dx = l_dx + j * Nx; int u_dy = j + 1; if (j == Ny - 1) u_dy = 0; u_dy = i + u_dy * Nx; int d_dy = j - 1; if (j == 0) d_dy = Ny - 1; d_dy = i + d_dy * Nx; output[index] = coeff_diff * ((c_pst[r_dx] + c_pst[index]) / 2.0f * (rho[r_dx] - rho[index]) / dx - (c_pst[l_dx] + c_pst[index]) / 2.0f * (rho[index] - rho[l_dx]) / dx) / dx + coeff_diff * ((c_pst[u_dy] + c_pst[index]) / 2.0f * (rho[u_dy] - rho[index]) / dy - (c_pst[d_dy] + c_pst[index]) / 2.0f * (rho[index] - rho[d_dy]) / dy) / dy; } void par_deriv1_fft(float *output, float *input, int dim, float *deriv_vec, cufftComplex *R2C_buffer, cufftComplex *C2R_buffer, int Nx, int Ny){ cufftExecR2C(plan_R2C, input, R2C_buffer); if(dim == 1){ fftR2C_deriv1 <<<blocks, threadsperblock >>>(R2C_buffer, C2R_buffer,1, deriv_vec, Nx, Ny); cufftExecC2R(plan_C2R, C2R_buffer, output); } if(dim == 2){ fftR2C_deriv1<<<blocks, threadsperblock >>>(R2C_buffer, C2R_buffer,2, deriv_vec, Nx, Ny); cufftExecC2R(plan_C2R, C2R_buffer, output); } } __global__ void poissonR2C(cufftComplex *input, cufftComplex *output, float *coeff_x, float *coeff_y, float coeff0, float coeff2, int Nx, int Ny){ //equation of coeff0*u - coeff2*Delta(u) = f int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; int index = i + j*(Nx / 2 + 1); if (i < (Nx / 2 + 1) && j < Ny){ float coeff = coeff_x[i] * coeff_x[i] + coeff_y[j] * coeff_y[j]; cufftComplex temp = input[index]; output[index].x = temp.x / (coeff0 + coeff2 * coeff) / (float)Nx / (float)Ny; output[index].y = temp.y / (coeff0 + coeff2 * coeff) / (float)Nx / (float)Ny; } } void poisson_sol(float *sol, float *rhs, float coeff0, float coeff2, float *coeff_x, float *coeff_y, cufftComplex *R2C_buffer, cufftComplex *C2R_buffer, int Nx, int Ny){ cufftExecR2C(plan_R2C, rhs, R2C_buffer); poissonR2C <<<blocks, threadsperblock >>>(R2C_buffer, C2R_buffer, coeff_x, coeff_y, coeff0, coeff2, Nx, Ny); cufftExecC2R(plan_C2R, C2R_buffer, sol); } /*----------------------------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------------------------*/
2667c177918ebe602dcdbf53dbb4c900538da7e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hiprand/hiprand_kernel.h> #include <hip/device_functions.h> #include "device_launch_parameters.h" #include <cstdlib> #include <hip/hip_runtime.h> #include <stdio.h> #include <iostream> #include <stdlib.h> #include "bmp/EasyBMP.h" #include <stdio.h> #include <time.h> using namespace std; #define WINDOW_SIZE 3 #define COUNT_POINTS 9 #define CUDA_CHECK_ERROR(err) \ if (err != hipSuccess) { \ printf("Cuda error: %s\n", hipGetErrorString(err)); \ printf("Error in file: %s, line: %i\n", __FILE__, __LINE__); \ system("pause");\ } // float texture<float, hipTextureType2D, hipReadModeElementType> tex; __global__ void medianFilter(float *output, int imageWidth, int imageHeight) { // int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; // float mask[COUNT_POINTS] = { 0,0,0,0,0,0,0,0,0 }; int k = 0; // . - . , -1 1 1 , // for (int i = -1; i <= 1; i++) { for (int j = -1; j <= 1; j++) { mask[k] = tex2D(tex, col + j, row + i); k++; } } // for (int i = 1; i < COUNT_POINTS; i++) { for (int j = i; j > 0 && mask[j - 1] > mask[j]; j--) { int tmp = mask[j - 1]; mask[j - 1] = mask[j]; mask[j] = tmp; } } // output[row * imageWidth + col] = mask[4]; } float *readImage(char *filePathInput, unsigned int *rows, unsigned int *cols) { BMP Image; Image.ReadFromFile(filePathInput); *rows = Image.TellHeight(); *cols = Image.TellWidth(); float *imageAsArray = (float *)calloc(*rows * *cols, sizeof(float)); // - for (int i = 0; i < Image.TellWidth(); i++) { for (int j = 0; j < Image.TellHeight(); j++) { double Temp = 0.30*(Image(i, j)->Red) + 0.59*(Image(i, j)->Green) + 0.11*(Image(i, j)->Blue); Image(i, j)->Red = (unsigned char)Temp; Image(i, j)->Green = (unsigned char)Temp; Image(i, j)->Blue = (unsigned char)Temp; imageAsArray[j * *cols + i] = Temp; } } return imageAsArray; } BMP readImageForCPU(char *filePathInput) { BMP Image; Image.ReadFromFile(filePathInput); // - for (int i = 0; i < Image.TellWidth(); i++) { for (int j = 0; j < Image.TellHeight(); j++) { double Temp = 0.30*(Image(i, j)->Red) + 0.59*(Image(i, j)->Green) + 0.11*(Image(i, j)->Blue); Image(i, j)->Red = (unsigned char)Temp; Image(i, j)->Green = (unsigned char)Temp; Image(i, j)->Blue = (unsigned char)Temp; } } Image.SetBitDepth(8); CreateGrayscaleColorTable(Image); return Image; } void writeImage(char *filePath, float *grayscale, unsigned int rows, unsigned int cols) { BMP Output; Output.SetSize(cols, rows); // for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { RGBApixel pixel; pixel.Red = grayscale[i * cols + j]; pixel.Green = grayscale[i * cols + j]; pixel.Blue = grayscale[i * cols + j]; pixel.Alpha = 0; Output.SetPixel(j, i, pixel); } } Output.WriteToFile(filePath); } BMP MedianFilterCPU(BMP image){ float mask[COUNT_POINTS] = { 0,0,0,0,0,0,0,0,0 }; BMP output; output.SetSize(image.TellHeight(), image.TellWidth()); for (int row = 0; row < image.TellWidth(); row++) { for (int col = 0; col < image.TellHeight(); col++) { // 0 if ((row == 0) || (col == 0) || (row == image.TellHeight() - 1) || (col == image.TellWidth() - 1)) { RGBApixel pixel; pixel.Red = 0; pixel.Green = 0; pixel.Blue = 0; output.SetPixel(col, row, pixel); } else { for (int x = 0; x < WINDOW_SIZE; x++) { for (int y = 0; y < WINDOW_SIZE; y++) { mask[x*WINDOW_SIZE + y] = image.GetPixel((col + y - 1), (row + x - 1)).Red; } } // for (int i = 1; i < COUNT_POINTS; i++) { for (int j = i; j > 0 && mask[j - 1] > mask[j]; j--) { int tmp = mask[j - 1]; mask[j - 1] = mask[j]; mask[j] = tmp; } } RGBApixel pixel; pixel.Red = mask[4]; pixel.Green = mask[4]; pixel.Blue = mask[4]; output.SetPixel(col, row, pixel); } } } return output; } int main() { setlocale(LC_ALL, "RUS"); unsigned int rows, cols; // float * imageAsArray = readImage ("lena.bmp", &rows, &cols); std::cout << ":" << rows<<"x"<< cols<< std::endl; BMP imgCPU = readImageForCPU("lena.bmp"); clock_t start_time = clock(); BMP outCPU = MedianFilterCPU(imgCPU); clock_t end_time = clock(); std::cout << " CPU = " << (double)((end_time - start_time) * 1000 / CLOCKS_PER_SEC) << " " << std::endl; outCPU.WriteToFile("resultCPU.bmp"); // Float hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipArray *cuArray; // Cuda hipChannelFormatDesc CUDA cuArray CUDA_CHECK_ERROR(hipMallocArray(&cuArray, &channelDesc, cols, rows)); // imageAsArray cuArray CUDA_CHECK_ERROR(hipMemcpyToArray(cuArray, 0, 0, imageAsArray, rows * cols * sizeof(float), hipMemcpyHostToDevice)); // tex.addressMode[0] = hipAddressModeClamp; tex.addressMode[1] = hipAddressModeClamp; tex.filterMode = hipFilterModePoint; // CUDA_CHECK_ERROR(hipBindTextureToArray(tex, cuArray, channelDesc)); float *dev_output, *output; float gpuTime = 0; output = (float *)calloc(rows * cols, sizeof(float)); CUDA_CHECK_ERROR(hipMalloc(&dev_output, rows * cols * sizeof(float))); dim3 dimBlock(16, 16); dim3 dimGrid((cols + dimBlock.x - 1) / dimBlock.x, (rows + dimBlock.y - 1) / dimBlock.y); hipEvent_t start; hipEvent_t stop; // event' GPU CUDA_CHECK_ERROR(hipEventCreate(&start)); CUDA_CHECK_ERROR(hipEventCreate(&stop)); // GPU hipEventRecord(start, 0); medianFilter << <dimGrid, dimBlock >> > (dev_output, cols, rows); // output CUDA_CHECK_ERROR(hipMemcpy(output, dev_output, rows * cols * sizeof(float), hipMemcpyDeviceToHost)); // hipEventRecord(stop, 0); // hipEventSynchronize(stop); // GPU hipEventElapsedTime(&gpuTime, start, stop); std::cout << " GPU = " << gpuTime << " " << std::endl; writeImage("result.bmp", output, rows, cols); // CUDA_CHECK_ERROR(hipFreeArray(cuArray)); CUDA_CHECK_ERROR(hipFree(dev_output)); system("pause"); return 0; }
2667c177918ebe602dcdbf53dbb4c900538da7e2.cu
#include "cuda_runtime.h" #include <curand_kernel.h> #include <device_functions.h> #include "device_launch_parameters.h" #include <cstdlib> #include <cuda.h> #include <stdio.h> #include <iostream> #include <stdlib.h> #include "bmp/EasyBMP.h" #include <stdio.h> #include <time.h> using namespace std; #define WINDOW_SIZE 3 #define COUNT_POINTS 9 #define CUDA_CHECK_ERROR(err) \ if (err != cudaSuccess) { \ printf("Cuda error: %s\n", cudaGetErrorString(err)); \ printf("Error in file: %s, line: %i\n", __FILE__, __LINE__); \ system("pause");\ } // объявляем ссылку на текстуру для двумерной текстуры float texture<float, cudaTextureType2D, cudaReadModeElementType> tex; __global__ void medianFilter(float *output, int imageWidth, int imageHeight) { // выбрали строку и столбец для потока int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; // маска по которой будет находиться медиана float mask[COUNT_POINTS] = { 0,0,0,0,0,0,0,0,0 }; int k = 0; // Т.к текстуры обладают свойством свертывание - т.е выход за границы, будем идти от -1 до 1 с шагом 1 по картинки, //заполняя маску for (int i = -1; i <= 1; i++) { for (int j = -1; j <= 1; j++) { mask[k] = tex2D(tex, col + j, row + i); k++; } } // Отсортировали значения в маске for (int i = 1; i < COUNT_POINTS; i++) { for (int j = i; j > 0 && mask[j - 1] > mask[j]; j--) { int tmp = mask[j - 1]; mask[j - 1] = mask[j]; mask[j] = tmp; } } // в результирующий центральный пиксель записали медиану output[row * imageWidth + col] = mask[4]; } float *readImage(char *filePathInput, unsigned int *rows, unsigned int *cols) { BMP Image; Image.ReadFromFile(filePathInput); *rows = Image.TellHeight(); *cols = Image.TellWidth(); float *imageAsArray = (float *)calloc(*rows * *cols, sizeof(float)); // Преобразуем картику в черно-белую for (int i = 0; i < Image.TellWidth(); i++) { for (int j = 0; j < Image.TellHeight(); j++) { double Temp = 0.30*(Image(i, j)->Red) + 0.59*(Image(i, j)->Green) + 0.11*(Image(i, j)->Blue); Image(i, j)->Red = (unsigned char)Temp; Image(i, j)->Green = (unsigned char)Temp; Image(i, j)->Blue = (unsigned char)Temp; imageAsArray[j * *cols + i] = Temp; } } return imageAsArray; } BMP readImageForCPU(char *filePathInput) { BMP Image; Image.ReadFromFile(filePathInput); // Преобразуем картику в черно-белую for (int i = 0; i < Image.TellWidth(); i++) { for (int j = 0; j < Image.TellHeight(); j++) { double Temp = 0.30*(Image(i, j)->Red) + 0.59*(Image(i, j)->Green) + 0.11*(Image(i, j)->Blue); Image(i, j)->Red = (unsigned char)Temp; Image(i, j)->Green = (unsigned char)Temp; Image(i, j)->Blue = (unsigned char)Temp; } } Image.SetBitDepth(8); CreateGrayscaleColorTable(Image); return Image; } void writeImage(char *filePath, float *grayscale, unsigned int rows, unsigned int cols) { BMP Output; Output.SetSize(cols, rows); // записали картинку for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { RGBApixel pixel; pixel.Red = grayscale[i * cols + j]; pixel.Green = grayscale[i * cols + j]; pixel.Blue = grayscale[i * cols + j]; pixel.Alpha = 0; Output.SetPixel(j, i, pixel); } } Output.WriteToFile(filePath); } BMP MedianFilterCPU(BMP image){ float mask[COUNT_POINTS] = { 0,0,0,0,0,0,0,0,0 }; BMP output; output.SetSize(image.TellHeight(), image.TellWidth()); for (int row = 0; row < image.TellWidth(); row++) { for (int col = 0; col < image.TellHeight(); col++) { //Границы 0 заполнил if ((row == 0) || (col == 0) || (row == image.TellHeight() - 1) || (col == image.TellWidth() - 1)) { RGBApixel pixel; pixel.Red = 0; pixel.Green = 0; pixel.Blue = 0; output.SetPixel(col, row, pixel); } else { for (int x = 0; x < WINDOW_SIZE; x++) { for (int y = 0; y < WINDOW_SIZE; y++) { mask[x*WINDOW_SIZE + y] = image.GetPixel((col + y - 1), (row + x - 1)).Red; } } // Отсортировали значения в маске for (int i = 1; i < COUNT_POINTS; i++) { for (int j = i; j > 0 && mask[j - 1] > mask[j]; j--) { int tmp = mask[j - 1]; mask[j - 1] = mask[j]; mask[j] = tmp; } } RGBApixel pixel; pixel.Red = mask[4]; pixel.Green = mask[4]; pixel.Blue = mask[4]; output.SetPixel(col, row, pixel); } } } return output; } int main() { setlocale(LC_ALL, "RUS"); unsigned int rows, cols; // считали картинку float * imageAsArray = readImage ("lena.bmp", &rows, &cols); std::cout << "Размер:" << rows<<"x"<< cols<< std::endl; BMP imgCPU = readImageForCPU("lena.bmp"); clock_t start_time = clock(); BMP outCPU = MedianFilterCPU(imgCPU); clock_t end_time = clock(); std::cout << "Время на CPU = " << (double)((end_time - start_time) * 1000 / CLOCKS_PER_SEC) << " мсек" << std::endl; outCPU.WriteToFile("resultCPU.bmp"); //Создали дескриптор канала с форматом Float cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaArray *cuArray; // Выделили Cuda массив соответствии со структурой cudaChannelFormatDesc и вернули дескриптор нового массива CUDA в cuArray CUDA_CHECK_ERROR(cudaMallocArray(&cuArray, &channelDesc, cols, rows)); // Скопировали массив imageAsArray в cuArray CUDA_CHECK_ERROR(cudaMemcpyToArray(cuArray, 0, 0, imageAsArray, rows * cols * sizeof(float), cudaMemcpyHostToDevice)); // Установили параметры текстуры tex.addressMode[0] = cudaAddressModeClamp; tex.addressMode[1] = cudaAddressModeClamp; tex.filterMode = cudaFilterModePoint; // Привязали массив к текстуре CUDA_CHECK_ERROR(cudaBindTextureToArray(tex, cuArray, channelDesc)); float *dev_output, *output; float gpuTime = 0; output = (float *)calloc(rows * cols, sizeof(float)); CUDA_CHECK_ERROR(cudaMalloc(&dev_output, rows * cols * sizeof(float))); dim3 dimBlock(16, 16); dim3 dimGrid((cols + dimBlock.x - 1) / dimBlock.x, (rows + dimBlock.y - 1) / dimBlock.y); cudaEvent_t start; cudaEvent_t stop; //Создаем event'ы для синхронизации и замера времени работы GPU CUDA_CHECK_ERROR(cudaEventCreate(&start)); CUDA_CHECK_ERROR(cudaEventCreate(&stop)); //Отмечаем старт расчетов на GPU cudaEventRecord(start, 0); medianFilter << <dimGrid, dimBlock >> > (dev_output, cols, rows); //Копируем результат с девайса на хост в output CUDA_CHECK_ERROR(cudaMemcpy(output, dev_output, rows * cols * sizeof(float), cudaMemcpyDeviceToHost)); //Отмечаем окончание расчета cudaEventRecord(stop, 0); //Синхронизируемя с моментом окончания расчетов cudaEventSynchronize(stop); //Рассчитываем время работы GPU cudaEventElapsedTime(&gpuTime, start, stop); std::cout << "Время на GPU = " << gpuTime << " мсек" << std::endl; writeImage("result.bmp", output, rows, cols); //Чистим ресурсы на видеокарте CUDA_CHECK_ERROR(cudaFreeArray(cuArray)); CUDA_CHECK_ERROR(cudaFree(dev_output)); system("pause"); return 0; }
8bc6f563670035a6285199782c581e2d872eeaea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707. // All Rights reserved. See files LICENSE and NOTICE for details. // // This file is part of CEED, a collection of benchmarks, miniapps, software // libraries and APIs for efficient high-order finite element and spectral // element discretizations for exascale applications. For more information and // source code availability see http://github.com/ceed. // // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, // a collaborative effort of two U.S. Department of Energy organizations (Office // of Science and the National Nuclear Security Administration) responsible for // the planning and preparation of a capable exascale ecosystem, including // software, applications, hardware, advanced system engineering and early // testbed platforms, in support of the nation's exascale computing imperative. #include "../common/grad.h" #define cu_ipow(a,b) ( (int)(__powf( (float)(a), (float)(b) ) ) ) #define ipow(a,b) ( (magma_int_t)(::pow( (float)(a), (float)(b) ) ) ) ////////////////////////////////////////////////////////////////////////////////////////// template<typename T, int P, int Q> static magma_int_t magma_grad_generic_kernel_driver( magma_int_t dim, magma_int_t ncomp, const T* dinterp1d, const T *dgrad1d, magma_trans_t transT, const T *dU, magma_int_t estrdU, const int cstrdU, T *dV, magma_int_t estrdV, const int cstrdV, magma_int_t dim_id, magma_int_t nelem, magma_queue_t queue) { magma_device_t device; magma_getdevice( &device ); magma_int_t shmem_max, nthreads_max; // ncomp*Q*CeedIntPow(P>Q?P:Q,dim-1); // originally the exponent is (dim-1), but we use dim because // we have to read the original u in shared memory // the original implementation access u directly magma_int_t tmp_size = CeedIntPow(max(P,Q), dim); //ncomp * Q * CeedIntPow(max(P,Q), dim); magma_int_t shmem = 2 * P * Q * sizeof(T); shmem += 2 * tmp_size * sizeof(T); magma_int_t pre = CeedIntPow(P, dim-1); magma_int_t nthreads = max(P, CeedIntPow(Q, dim-1) ); nthreads = magma_roundup( nthreads, Q ); // nthreads must be multiple of Q hipDeviceGetAttribute (&nthreads_max, hipDeviceAttributeMaxThreadsPerBlock, device); #if TORCH_HIP_VERSION >= 9000 hipDeviceGetAttribute (&shmem_max, hipDeviceAttributeSharedMemPerBlockOptin, device); if (shmem <= shmem_max) { hipFuncSetAttribute(magma_grad_generic_kernel<T, P, Q>, hipFuncAttributeMaxDynamicSharedMemorySize, shmem); } #else hipDeviceGetAttribute (&shmem_max, hipDeviceAttributeMaxSharedMemoryPerBlock, device); #endif // TORCH_HIP_VERSION >= 9000 if ( nthreads > nthreads_max || shmem > shmem_max ) { return 1; } else { dim3 threads(nthreads, 1, 1); dim3 grid(nelem, ncomp, 1); hipLaunchKernelGGL(( magma_grad_generic_kernel<T, P, Q>), dim3(grid), dim3(threads), shmem, magma_queue_get_cuda_stream(queue), dim, ncomp, pre, tmp_size, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id ); return (hipPeekAtLastError() == hipSuccess) ? 0 : 1; } } ////////////////////////////////////////////////////////////////////////////////////////// template<int P> static magma_int_t magma_grad_generic_q( magma_int_t Q, magma_int_t dim, magma_int_t ncomp, const CeedScalar* dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dim_id, magma_int_t nelem, magma_queue_t queue) { magma_int_t launch_failed = 0; switch(Q){ case 1: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 1> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 2: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 2> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 3: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 3> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 4: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 4> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 5: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 5> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 6: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 6> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 7: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 7> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 8: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 8> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 9: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 9> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 10: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P,10> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; default: launch_failed = 1; } return launch_failed; } ////////////////////////////////////////////////////////////////////////////////////////// static magma_int_t magma_grad_generic_q_p( magma_int_t P, magma_int_t Q, magma_int_t dim, magma_int_t ncomp, const CeedScalar* dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dim_id, magma_int_t nelem, magma_queue_t queue) { magma_int_t launch_failed = 0; switch(P){ case 1: launch_failed = magma_grad_generic_q< 1> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 2: launch_failed = magma_grad_generic_q< 2> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 3: launch_failed = magma_grad_generic_q< 3> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 4: launch_failed = magma_grad_generic_q< 4> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 5: launch_failed = magma_grad_generic_q< 5> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 6: launch_failed = magma_grad_generic_q< 6> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 7: launch_failed = magma_grad_generic_q< 7> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 8: launch_failed = magma_grad_generic_q< 8> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 9: launch_failed = magma_grad_generic_q< 9> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 10: launch_failed = magma_grad_generic_q<10> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; default: launch_failed = 1; } return launch_failed; } ////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_grad_generic( magma_int_t P, magma_int_t Q, magma_int_t dim, magma_int_t ncomp, const CeedScalar* dinterp1d, const CeedScalar *dgrad1d, CeedTransposeMode tmode, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t u_dimstride, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t v_dimstride, magma_int_t nelem, magma_queue_t queue) { magma_int_t launch_failed = 0; magma_trans_t transT = (tmode == CEED_NOTRANSPOSE) ? MagmaNoTrans : MagmaTrans; // Loop through grad dimensions only, batch call over elements and components for (CeedInt dim_ctr = 0; dim_ctr < dim; dim_ctr++) { launch_failed = magma_grad_generic_q_p( P, Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU + dim_ctr * u_dimstride, estrdU, cstrdU, dV + dim_ctr * v_dimstride, estrdV, cstrdV, dim_ctr, nelem, queue ); if (launch_failed != 0) break; } return launch_failed; }
8bc6f563670035a6285199782c581e2d872eeaea.cu
// Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707. // All Rights reserved. See files LICENSE and NOTICE for details. // // This file is part of CEED, a collection of benchmarks, miniapps, software // libraries and APIs for efficient high-order finite element and spectral // element discretizations for exascale applications. For more information and // source code availability see http://github.com/ceed. // // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, // a collaborative effort of two U.S. Department of Energy organizations (Office // of Science and the National Nuclear Security Administration) responsible for // the planning and preparation of a capable exascale ecosystem, including // software, applications, hardware, advanced system engineering and early // testbed platforms, in support of the nation's exascale computing imperative. #include "../common/grad.h" #define cu_ipow(a,b) ( (int)(__powf( (float)(a), (float)(b) ) ) ) #define ipow(a,b) ( (magma_int_t)(std::pow( (float)(a), (float)(b) ) ) ) ////////////////////////////////////////////////////////////////////////////////////////// template<typename T, int P, int Q> static magma_int_t magma_grad_generic_kernel_driver( magma_int_t dim, magma_int_t ncomp, const T* dinterp1d, const T *dgrad1d, magma_trans_t transT, const T *dU, magma_int_t estrdU, const int cstrdU, T *dV, magma_int_t estrdV, const int cstrdV, magma_int_t dim_id, magma_int_t nelem, magma_queue_t queue) { magma_device_t device; magma_getdevice( &device ); magma_int_t shmem_max, nthreads_max; // ncomp*Q*CeedIntPow(P>Q?P:Q,dim-1); // originally the exponent is (dim-1), but we use dim because // we have to read the original u in shared memory // the original implementation access u directly magma_int_t tmp_size = CeedIntPow(max(P,Q), dim); //ncomp * Q * CeedIntPow(max(P,Q), dim); magma_int_t shmem = 2 * P * Q * sizeof(T); shmem += 2 * tmp_size * sizeof(T); magma_int_t pre = CeedIntPow(P, dim-1); magma_int_t nthreads = max(P, CeedIntPow(Q, dim-1) ); nthreads = magma_roundup( nthreads, Q ); // nthreads must be multiple of Q cudaDeviceGetAttribute (&nthreads_max, cudaDevAttrMaxThreadsPerBlock, device); #if CUDA_VERSION >= 9000 cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlockOptin, device); if (shmem <= shmem_max) { cudaFuncSetAttribute(magma_grad_generic_kernel<T, P, Q>, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem); } #else cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlock, device); #endif // CUDA_VERSION >= 9000 if ( nthreads > nthreads_max || shmem > shmem_max ) { return 1; } else { dim3 threads(nthreads, 1, 1); dim3 grid(nelem, ncomp, 1); magma_grad_generic_kernel<T, P, Q><<<grid, threads, shmem, magma_queue_get_cuda_stream(queue)>>> ( dim, ncomp, pre, tmp_size, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id ); return (cudaPeekAtLastError() == cudaSuccess) ? 0 : 1; } } ////////////////////////////////////////////////////////////////////////////////////////// template<int P> static magma_int_t magma_grad_generic_q( magma_int_t Q, magma_int_t dim, magma_int_t ncomp, const CeedScalar* dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dim_id, magma_int_t nelem, magma_queue_t queue) { magma_int_t launch_failed = 0; switch(Q){ case 1: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 1> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 2: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 2> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 3: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 3> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 4: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 4> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 5: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 5> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 6: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 6> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 7: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 7> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 8: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 8> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 9: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P, 9> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 10: launch_failed = magma_grad_generic_kernel_driver<CeedScalar, P,10> ( dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; default: launch_failed = 1; } return launch_failed; } ////////////////////////////////////////////////////////////////////////////////////////// static magma_int_t magma_grad_generic_q_p( magma_int_t P, magma_int_t Q, magma_int_t dim, magma_int_t ncomp, const CeedScalar* dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dim_id, magma_int_t nelem, magma_queue_t queue) { magma_int_t launch_failed = 0; switch(P){ case 1: launch_failed = magma_grad_generic_q< 1> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 2: launch_failed = magma_grad_generic_q< 2> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 3: launch_failed = magma_grad_generic_q< 3> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 4: launch_failed = magma_grad_generic_q< 4> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 5: launch_failed = magma_grad_generic_q< 5> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 6: launch_failed = magma_grad_generic_q< 6> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 7: launch_failed = magma_grad_generic_q< 7> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 8: launch_failed = magma_grad_generic_q< 8> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 9: launch_failed = magma_grad_generic_q< 9> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; case 10: launch_failed = magma_grad_generic_q<10> (Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, dim_id, nelem, queue); break; default: launch_failed = 1; } return launch_failed; } ////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_grad_generic( magma_int_t P, magma_int_t Q, magma_int_t dim, magma_int_t ncomp, const CeedScalar* dinterp1d, const CeedScalar *dgrad1d, CeedTransposeMode tmode, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t u_dimstride, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t v_dimstride, magma_int_t nelem, magma_queue_t queue) { magma_int_t launch_failed = 0; magma_trans_t transT = (tmode == CEED_NOTRANSPOSE) ? MagmaNoTrans : MagmaTrans; // Loop through grad dimensions only, batch call over elements and components for (CeedInt dim_ctr = 0; dim_ctr < dim; dim_ctr++) { launch_failed = magma_grad_generic_q_p( P, Q, dim, ncomp, dinterp1d, dgrad1d, transT, dU + dim_ctr * u_dimstride, estrdU, cstrdU, dV + dim_ctr * v_dimstride, estrdV, cstrdV, dim_ctr, nelem, queue ); if (launch_failed != 0) break; } return launch_failed; }
d81de28fccb7c7a0165cd7f010dc6e779d624d07.hip
// !!! This is a file automatically generated by hipify!!! /********************************************************************************** Copyright 2021 Charles W. Johnson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **********************************************************************************/ // // Filename: single_gpu_FW.cpp // Author: Charles W Johnson // Description: Single GPU-based Floyd-Warshall algorithm // // Note: The GPU kernel and overall approach to implementing Floyd-Warshall like // this is from an online example by Saaduddin Mahmud that, as of April 13, 2021, // can be found at: https://saadmahmud14.medium.com/parallel-programming-with-cuda-tutorial-part-4-the-floyd-warshall-algorithm-5e1281c46bf6 // // The 'packaging' and the rest of the program is all mine (Charles W. Johnson). // #include <chrono> #include <iomanip> #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "FW_file_io.cuh" using namespace std; using namespace std::chrono; #define INF 255 /* ---- GPU Kernels ---- */ // Name: FW_IJ_loops_with_next // // Description: Naive Floyd-Warshall kernel that computes both the dist and the next matrices // __global__ void FW_IJ_loops_with_next(uint8_t* d_dist_pc, int* d_next_pc, uint32_t num_vertices, uint32_t k) { int ij; int ik; int kj; // get the thread's location in the grid // int x = (blockDim.x * blockIdx.x) + threadIdx.x; int y = (blockDim.y * blockIdx.y) + threadIdx.y; // get the # of threads in each grid dim // int x_threads = gridDim.x * blockDim.x; int y_threads = gridDim.y * blockDim.y; for (int i = x; i < num_vertices; i += x_threads) // I'm going to start at x,y and then jump { // forward by the x-dim of the grid each time // until I can't jump farther for (int j = y; j < num_vertices; j += y_threads) // I'm doing the same thing, only in the y-dim { // calculate the memory locations of [i][j], [i][k], and [k][j] // ij = (i * num_vertices) + j; ik = (i * num_vertices) + k; kj = (k * num_vertices) + j; if (d_dist_pc[ij] > (d_dist_pc[ik] + d_dist_pc[kj])) { d_dist_pc[ij] = (d_dist_pc[ik] + d_dist_pc[kj]); d_next_pc[ij] = d_next_pc[ik]; } } } } // Name: FW_IJ_loops // // Description: Naive Floyd-Warshall kernel that only computes the dist matrix // __global__ void FW_IJ_loops(uint8_t* d_dist_pc, uint32_t num_vertices, uint32_t k) { int ij; int ik; int kj; // get the thread's location in the grid // int x = (blockDim.x * blockIdx.x) + threadIdx.x; int y = (blockDim.y * blockIdx.y) + threadIdx.y; // get the # of threads in each grid dim // int x_threads = gridDim.x * blockDim.x; int y_threads = gridDim.y * blockDim.y; for (int i = x; i < num_vertices; i += x_threads) // I'm going to start at x,y and then jump { // forward by the x-dim of the grid each time // until I can't jump farther for (int j = y; j < num_vertices; j += y_threads) // I'm doing the same thing, only in the y-dim { // calculate the memory locations of [i][j], [i][k], and [k][j] // ij = (i * num_vertices) + j; ik = (i * num_vertices) + k; kj = (k * num_vertices) + j; if (d_dist_pc[ij] > (d_dist_pc[ik] + d_dist_pc[kj])) { d_dist_pc[ij] = (d_dist_pc[ik] + d_dist_pc[kj]); } } } } /* ---- CPU functions ---- */ // Name: FW_no_next // // Description: Naive Floyd-Warshall function that only computes the dist matrix // void FW_no_next(uint8_t** dist, uint32_t num_vertices, double& duration) { auto start = chrono::high_resolution_clock::now(); for (int k=0; k < num_vertices; k++) { for (int i=0; i < num_vertices; i++) { for (int j=0; j < num_vertices; j++) { if (dist[i][j] > (dist[i][k] + dist[k][j])) { dist[i][j] = (dist[i][k] + dist[k][j]); } } } } auto stop = chrono::high_resolution_clock::now(); auto temp_duration = duration_cast<microseconds>(stop - start); duration = temp_duration.count(); } // Name: FW_with_next // // Description: Naive Floyd-Warshall function that computes both the dist and the next matrices // void FW_with_next(uint8_t** dist, int** next, uint32_t num_vertices, double& duration) { auto start = chrono::high_resolution_clock::now(); for (int k=0; k < num_vertices; k++) { for (int i=0; i < num_vertices; i++) { for (int j=0; j < num_vertices; j++) { if (dist[i][j] > (dist[i][k] + dist[k][j])) { dist[i][j] = (dist[i][k] + dist[k][j]); next[i][j] = next[i][k]; } } } } auto stop = chrono::high_resolution_clock::now(); auto temp_duration = duration_cast<microseconds>(stop - start); duration = temp_duration.count(); } /* ---- The Main Show ---- */ /* The main() function takes the following arguments: 1 - filename 2 - next ('next' or 'no_next') - this is to determine whether or not a next matrix should be created 3 - save dist ('save_dist' or 'no_save_dist') - this is to decide whether to save a copy of the dist matrix 4 - save next ('save_next' or 'no_save_next') - this is to decide whether to save a copy of the next matrix 5 - console ('console' or 'no_console') - display optional STDOUT 6 - check ('check' or 'no_check') - check the GPU output against CPU output - takes time!!! */ int main(int argc, char* argv[]) { // Check to see if we got all arguments // if (argc < 7) { cout << "This function takes the following arguments:" << endl; cout << endl; cout << "1 - graph filename" << endl; cout << "2 - whether or not to create a next matrix - 'next' or 'no_next'" << endl; cout << "3 - whether or not to save the dist matrix - 'save_dist' or 'no_save_dist'" << endl; cout << "4 - whether or not to save the next matrix - 'save_next' or 'no_save_next'" << endl; cout << "5 - whether or not to show the console - 'console' or 'no_console'" << endl; cout << "6 - whether or not to run a CPU version and check the GPU output against the CPU output - 'check' or 'no_check'" << endl; cout << endl; cout << "Warning: The CPU F-W version will take a long time to compute, so only use the check option if you are SURE that you want to do the check!!!" << endl; cout << endl; return 0; } // Check to see if the correct values were used and make assignments PRN // string temp_check = ""; string next_option = ""; string save_dist_results = ""; string save_next_results = ""; bool show_console = false; bool check_output = false; // next matrix // temp_check = argv[2]; if ((temp_check == "next") || (temp_check == "no_next")) { next_option = temp_check; } else { cout << "The next option must be 'next' or 'no_next'." << endl << endl; return 0; } // Save dist matrix to disk or not // temp_check = argv[3]; if ((temp_check == "save_dist") || (temp_check == "no_save_dist")) { save_dist_results = temp_check; } else { cout << "The save dist results option must be 'save_dist' or 'no_save_dist'." << endl << endl; return 0; } // Save next matrix to disk or not // temp_check = argv[4]; if ((temp_check == "save_next") || (temp_check == "no_save_next")) { save_next_results = temp_check; } else { cout << "The save next results option must be 'save_next' or 'no_save_next'." << endl << endl; return 0; } // Display certain output to STDOUT // temp_check = argv[5]; if ((temp_check == "console") || (temp_check == "no_console")) { if (temp_check == "console") { show_console = true; } } else { cout << "The show console option must be 'console' or 'no_console'." << endl << endl; return 0; } // Perform a CPU check? // temp_check = argv[6]; if ((temp_check == "check") || (temp_check == "no_check")) { if (temp_check == "check") { check_output = true; } } else { cout << "The check results option must be 'check' or 'no_check'." << endl << endl; return 0; } /* ---- If we get this far, it's now time to read in the num_vertices and num_edges ---- */ uint32_t num_vertices = 0; // Number of vertices uint32_t num_edges = 0; // Number of edges // Read in the number of vertices from the file // readInNumVerticesAndEdges(argv[1], num_vertices, num_edges); if (show_console) { cout << endl; cout << "Num of vertices is: " << num_vertices << endl; cout << "Num of edges is: " << num_edges << endl; cout << endl; } /* ---- Begin Declaring and Allocating Graph Variables and Memory ---- */ // -- First, the CPU matrices -- // // The host dist matrices // h_dist_pc is going to be a huge chunk of memory for the dist matrix. // h_dist is going to be how we reference it uint64_t mem_size = (sizeof(uint8_t) * num_vertices * num_vertices); uint8_t **h_dist; uint8_t *h_dist_pc; uint8_t **cpu_dist; uint8_t *cpu_dist_pc; h_dist = (uint8_t **) malloc(num_vertices * sizeof(uint8_t *)); h_dist_pc = (uint8_t *) malloc(mem_size); if (check_output) { cpu_dist = (uint8_t **) malloc(num_vertices * sizeof(uint8_t *)); cpu_dist_pc = (uint8_t *) malloc(mem_size); } if (show_console) cout << "h_dist_pc = " << mem_size << " bytes (" << ((mem_size / 1024.0) / 1024.0) << " MB)" << endl; if (show_console) cout << endl; // Puts a pointer in dist[i] to a place in the chunk // of memory that will represent that row. // for (int i=0; i < num_vertices; i++) { h_dist[i] = h_dist_pc + (i * num_vertices); if (check_output) { cpu_dist[i] = cpu_dist_pc + (i * num_vertices); } } // The next matrix // h_next_pc is going to be a huge chunk of memory for the h_next matrix. // h_next is going to be how we reference it int **h_next; int *h_next_pc; int **cpu_next; int *cpu_next_pc; if (next_option == "next") { mem_size = (sizeof(int) * num_vertices * num_vertices); h_next = (int **) malloc(num_vertices * sizeof(int *)); h_next_pc = (int *) malloc(mem_size); if (check_output) { cpu_next = (int **) malloc(num_vertices * sizeof(int *)); cpu_next_pc = (int *) malloc(mem_size); } if (show_console) cout << "h_next_pc = " << mem_size << " bytes (" << ((mem_size / 1024.0) / 1024.0) << " MB)" << endl; if (show_console) cout << endl; // Puts a pointer in h_next[i] to a place in the chunk // of memory that will represent that row. for (int i=0; i < num_vertices; i++) { h_next[i] = h_next_pc + (i * num_vertices); if (check_output) { cpu_next[i] = cpu_next_pc + (i * num_vertices); } } } // -- Second, the GPU matrices -- // // The dist GPU matrix // mem_size = (sizeof(uint8_t) * num_vertices * num_vertices); uint8_t *d_dist_pc; hipMalloc((void **) &d_dist_pc, (mem_size)); // The next GPU matrix // int *d_next_pc; if (next_option == "next") { mem_size = (sizeof(int) * num_vertices * num_vertices); hipMalloc((void **) &d_next_pc, (mem_size)); } // Other GPU variables // uint32_t *d_num_vertices; hipMalloc((void **) &d_num_vertices, sizeof(uint32_t)); /* ---- End of Graph Memory Allocation ---- */ /* ---- Prep the matrices ---- */ // Initialize all points in the h_dist matrix to INF // Initialize all points in the h_next matrix to -1 (PRN) // for (int i=0; i<num_vertices; i++) { for (int j=0; j<num_vertices; j++) { h_dist[i][j] = INF; if (check_output) cpu_dist[i][j] = INF; if (next_option == "next") { h_next[i][j] = -1; if (check_output) cpu_next[i][j] = -1; } } } /* ---- Read in graph and store the data in the dist matrix ---- */ bool use_next = false; if (next_option == "next") { use_next = true; } readInGraph(argv[1], h_dist, h_next, use_next); if (check_output) { readInGraph(argv[1], cpu_dist, cpu_next, use_next); } /* ---- Set the distances for each vertex for itself to be 0 in dist ---- */ for (int i=0; i<num_vertices; i++) { h_dist[i][i] = 0; if (check_output) cpu_dist[i][i] = 0; if (next_option == "next") { h_next[i][i] = i; if (check_output) cpu_next[i][i] = i; } } /* ---- Copy the data to the GPU ---- */ auto start_gpu_inc_copy = chrono::high_resolution_clock::now(); mem_size = (sizeof(uint8_t) * num_vertices * num_vertices); hipMemcpy(d_dist_pc, h_dist_pc, mem_size, hipMemcpyHostToDevice); if (next_option == "next") { mem_size = (sizeof(int) * num_vertices * num_vertices); hipMemcpy(d_next_pc, h_next_pc, mem_size, hipMemcpyHostToDevice); } hipMemcpy(d_num_vertices, &num_vertices, sizeof(uint32_t), hipMemcpyHostToDevice); /* ---- Run the F-W algorithm ---- */ // As we'll be using grids of 32 threads in the y-axis, then we'll want // the num_blocks in the y-axis to be a multiple of 32. And since it's OK // if there's a remainder, we can use integer division. However, since there // still is the 65,535 limit, we'll look at the number of vertices. int blocks; if (num_vertices < 65535) { if (num_vertices < 32) { blocks = 1; } else { blocks = (num_vertices + 31) / 32; } } else { blocks = 2047; // the most 32-thread blocks that can fit into 65,535 } dim3 grid(blocks,blocks,1); // we want to have a square matrix (to keep things simple!) dim3 block(32,32,1); // maximizes occupancy - 1,024 threads/block, so 2 blocks/SM (except for CC 7.5 and 8.0) if (show_console) cout << "About to run GPU F-W" << endl << endl; auto start_gpu = chrono::high_resolution_clock::now(); if (next_option == "next") { for (int k = 0; k < num_vertices; k++) { hipLaunchKernelGGL(( FW_IJ_loops_with_next), dim3(grid), dim3(block), 0, 0, d_dist_pc, d_next_pc, num_vertices, k); } hipDeviceSynchronize(); } else { for (int k = 0; k < num_vertices; k++) { hipLaunchKernelGGL(( FW_IJ_loops), dim3(grid), dim3(block), 0, 0, d_dist_pc, num_vertices, k); } hipDeviceSynchronize(); } auto stop_gpu = chrono::high_resolution_clock::now(); if (show_console) cout << "GPU F-W run complete" << endl << endl; /* ---- Copy data back to host ---- */ mem_size = (sizeof(uint8_t) * num_vertices * num_vertices); hipMemcpy(h_dist_pc, d_dist_pc, mem_size, hipMemcpyDeviceToHost); if (next_option == "next") { mem_size = (sizeof(int) * num_vertices * num_vertices); hipMemcpy(h_next_pc, d_next_pc, mem_size, hipMemcpyDeviceToHost); } auto stop_gpu_inc_copy = chrono::high_resolution_clock::now(); /* ---- If specified, run the CPU-based F-W algorithm on cpu_dist (and cpu_next PRN) matrix ---- */ double duration_cpu = 0; if (check_output) { if (show_console) cout << "About to start CPU F-W run" << endl << endl; if (next_option == "next") { FW_with_next(cpu_dist, cpu_next, num_vertices, duration_cpu); } else { FW_no_next(cpu_dist, num_vertices, duration_cpu); } if (show_console) cout << "CPU F-W run complete" << endl << endl; } /* ---- Check the matrices against each other ---- */ bool check_cleared = true; if (check_output) { if (show_console) cout << "About to start CPU/GPU comparison check" << endl << endl; for (int i=0; i < num_vertices; i++) { for (int j=0; j < num_vertices; j++) { if (h_dist[i][j] != cpu_dist[i][j]) { check_cleared = false; break; } if (next_option == "next") { if (h_next[i][j] != cpu_next[i][j]) { check_cleared = false; break; } } } if (check_cleared == false) { break; } } if (show_console) cout << "CPU/GPU comparison check complete" << endl << endl; } if (check_output) { if (check_cleared == false) { cout << endl; cout << "Check failed - CPU and GPU matrices DO NOT MATCH!" << endl; cout << endl; } else { cout << endl; cout << "Check PASSED - CPU and GPU matrices match" << endl; cout << endl; } } /* ---- Save the dist and next matrices to disk as called for ---- */ if (save_dist_results == "save_dist") { // save the dist matrix to disk saveDistMatrixToDisk(argv[1], h_dist, num_vertices, num_edges); } if ((save_next_results == "save_next") && (next_option == "next")) { // save the next matrix to disk saveNextMatrixToDisk(argv[1], h_next, num_vertices, num_edges); } /* ---- Print out the runtime for the algorithm itself ---- */ // Compute the time taken to run algorithm auto temp_duration_gpu = duration_cast<microseconds>(stop_gpu - start_gpu); auto temp_duration_gpu_inc_copy = duration_cast<microseconds>(stop_gpu_inc_copy - start_gpu_inc_copy); double duration_gpu = temp_duration_gpu.count(); double duration_gpu_inc_copy = temp_duration_gpu_inc_copy.count(); if (show_console) { cout << "Runtime for the GPU F-W algorithm itself is: " << (duration_gpu / 1000.0) << " milliseconds" << endl; cout << "Runtime for the GPU F-W algorithm itself is: " << ((duration_gpu / 1000.0) / 1000.0) << " seconds" << endl; cout << endl; cout << "Runtime for the GPU F-W algorithm with mem copy is: " << (duration_gpu_inc_copy / 1000.0) << " milliseconds" << endl; cout << "Runtime for the GPU F-W algorithm with mem copy is: " << ((duration_gpu_inc_copy / 1000.0) / 1000.0) << " seconds" << endl; cout << endl; if (check_output) { cout << "Runtime for the CPU F-W algorithm is: " << (duration_cpu / 1000.0) << " milliseconds" << endl; cout << "Runtime for the CPU F-W algorithm is: " << ((duration_cpu / 1000.0) / 1000.0) << " seconds" << endl; cout << endl; } } // Free up malloc'ed memory // free(h_dist); free(h_dist_pc); if (check_output) { free(cpu_dist); free(cpu_dist_pc); } if (next_option == "next") { free(h_next); free(h_next_pc); if (check_output) { free(cpu_next); free(cpu_next_pc); } hipFree(d_next_pc); } hipFree(d_dist_pc); hipFree(d_num_vertices); // output the result in microseconds // cout << setprecision(15) << duration_gpu; if (show_console) cout << endl; return 0; }
d81de28fccb7c7a0165cd7f010dc6e779d624d07.cu
/********************************************************************************** Copyright 2021 Charles W. Johnson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **********************************************************************************/ // // Filename: single_gpu_FW.cpp // Author: Charles W Johnson // Description: Single GPU-based Floyd-Warshall algorithm // // Note: The GPU kernel and overall approach to implementing Floyd-Warshall like // this is from an online example by Saaduddin Mahmud that, as of April 13, 2021, // can be found at: https://saadmahmud14.medium.com/parallel-programming-with-cuda-tutorial-part-4-the-floyd-warshall-algorithm-5e1281c46bf6 // // The 'packaging' and the rest of the program is all mine (Charles W. Johnson). // #include <chrono> #include <iomanip> #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <cuda.h> #include <cuda_runtime.h> #include "FW_file_io.cuh" using namespace std; using namespace std::chrono; #define INF 255 /* ---- GPU Kernels ---- */ // Name: FW_IJ_loops_with_next // // Description: Naive Floyd-Warshall kernel that computes both the dist and the next matrices // __global__ void FW_IJ_loops_with_next(uint8_t* d_dist_pc, int* d_next_pc, uint32_t num_vertices, uint32_t k) { int ij; int ik; int kj; // get the thread's location in the grid // int x = (blockDim.x * blockIdx.x) + threadIdx.x; int y = (blockDim.y * blockIdx.y) + threadIdx.y; // get the # of threads in each grid dim // int x_threads = gridDim.x * blockDim.x; int y_threads = gridDim.y * blockDim.y; for (int i = x; i < num_vertices; i += x_threads) // I'm going to start at x,y and then jump { // forward by the x-dim of the grid each time // until I can't jump farther for (int j = y; j < num_vertices; j += y_threads) // I'm doing the same thing, only in the y-dim { // calculate the memory locations of [i][j], [i][k], and [k][j] // ij = (i * num_vertices) + j; ik = (i * num_vertices) + k; kj = (k * num_vertices) + j; if (d_dist_pc[ij] > (d_dist_pc[ik] + d_dist_pc[kj])) { d_dist_pc[ij] = (d_dist_pc[ik] + d_dist_pc[kj]); d_next_pc[ij] = d_next_pc[ik]; } } } } // Name: FW_IJ_loops // // Description: Naive Floyd-Warshall kernel that only computes the dist matrix // __global__ void FW_IJ_loops(uint8_t* d_dist_pc, uint32_t num_vertices, uint32_t k) { int ij; int ik; int kj; // get the thread's location in the grid // int x = (blockDim.x * blockIdx.x) + threadIdx.x; int y = (blockDim.y * blockIdx.y) + threadIdx.y; // get the # of threads in each grid dim // int x_threads = gridDim.x * blockDim.x; int y_threads = gridDim.y * blockDim.y; for (int i = x; i < num_vertices; i += x_threads) // I'm going to start at x,y and then jump { // forward by the x-dim of the grid each time // until I can't jump farther for (int j = y; j < num_vertices; j += y_threads) // I'm doing the same thing, only in the y-dim { // calculate the memory locations of [i][j], [i][k], and [k][j] // ij = (i * num_vertices) + j; ik = (i * num_vertices) + k; kj = (k * num_vertices) + j; if (d_dist_pc[ij] > (d_dist_pc[ik] + d_dist_pc[kj])) { d_dist_pc[ij] = (d_dist_pc[ik] + d_dist_pc[kj]); } } } } /* ---- CPU functions ---- */ // Name: FW_no_next // // Description: Naive Floyd-Warshall function that only computes the dist matrix // void FW_no_next(uint8_t** dist, uint32_t num_vertices, double& duration) { auto start = chrono::high_resolution_clock::now(); for (int k=0; k < num_vertices; k++) { for (int i=0; i < num_vertices; i++) { for (int j=0; j < num_vertices; j++) { if (dist[i][j] > (dist[i][k] + dist[k][j])) { dist[i][j] = (dist[i][k] + dist[k][j]); } } } } auto stop = chrono::high_resolution_clock::now(); auto temp_duration = duration_cast<microseconds>(stop - start); duration = temp_duration.count(); } // Name: FW_with_next // // Description: Naive Floyd-Warshall function that computes both the dist and the next matrices // void FW_with_next(uint8_t** dist, int** next, uint32_t num_vertices, double& duration) { auto start = chrono::high_resolution_clock::now(); for (int k=0; k < num_vertices; k++) { for (int i=0; i < num_vertices; i++) { for (int j=0; j < num_vertices; j++) { if (dist[i][j] > (dist[i][k] + dist[k][j])) { dist[i][j] = (dist[i][k] + dist[k][j]); next[i][j] = next[i][k]; } } } } auto stop = chrono::high_resolution_clock::now(); auto temp_duration = duration_cast<microseconds>(stop - start); duration = temp_duration.count(); } /* ---- The Main Show ---- */ /* The main() function takes the following arguments: 1 - filename 2 - next ('next' or 'no_next') - this is to determine whether or not a next matrix should be created 3 - save dist ('save_dist' or 'no_save_dist') - this is to decide whether to save a copy of the dist matrix 4 - save next ('save_next' or 'no_save_next') - this is to decide whether to save a copy of the next matrix 5 - console ('console' or 'no_console') - display optional STDOUT 6 - check ('check' or 'no_check') - check the GPU output against CPU output - takes time!!! */ int main(int argc, char* argv[]) { // Check to see if we got all arguments // if (argc < 7) { cout << "This function takes the following arguments:" << endl; cout << endl; cout << "1 - graph filename" << endl; cout << "2 - whether or not to create a next matrix - 'next' or 'no_next'" << endl; cout << "3 - whether or not to save the dist matrix - 'save_dist' or 'no_save_dist'" << endl; cout << "4 - whether or not to save the next matrix - 'save_next' or 'no_save_next'" << endl; cout << "5 - whether or not to show the console - 'console' or 'no_console'" << endl; cout << "6 - whether or not to run a CPU version and check the GPU output against the CPU output - 'check' or 'no_check'" << endl; cout << endl; cout << "Warning: The CPU F-W version will take a long time to compute, so only use the check option if you are SURE that you want to do the check!!!" << endl; cout << endl; return 0; } // Check to see if the correct values were used and make assignments PRN // string temp_check = ""; string next_option = ""; string save_dist_results = ""; string save_next_results = ""; bool show_console = false; bool check_output = false; // next matrix // temp_check = argv[2]; if ((temp_check == "next") || (temp_check == "no_next")) { next_option = temp_check; } else { cout << "The next option must be 'next' or 'no_next'." << endl << endl; return 0; } // Save dist matrix to disk or not // temp_check = argv[3]; if ((temp_check == "save_dist") || (temp_check == "no_save_dist")) { save_dist_results = temp_check; } else { cout << "The save dist results option must be 'save_dist' or 'no_save_dist'." << endl << endl; return 0; } // Save next matrix to disk or not // temp_check = argv[4]; if ((temp_check == "save_next") || (temp_check == "no_save_next")) { save_next_results = temp_check; } else { cout << "The save next results option must be 'save_next' or 'no_save_next'." << endl << endl; return 0; } // Display certain output to STDOUT // temp_check = argv[5]; if ((temp_check == "console") || (temp_check == "no_console")) { if (temp_check == "console") { show_console = true; } } else { cout << "The show console option must be 'console' or 'no_console'." << endl << endl; return 0; } // Perform a CPU check? // temp_check = argv[6]; if ((temp_check == "check") || (temp_check == "no_check")) { if (temp_check == "check") { check_output = true; } } else { cout << "The check results option must be 'check' or 'no_check'." << endl << endl; return 0; } /* ---- If we get this far, it's now time to read in the num_vertices and num_edges ---- */ uint32_t num_vertices = 0; // Number of vertices uint32_t num_edges = 0; // Number of edges // Read in the number of vertices from the file // readInNumVerticesAndEdges(argv[1], num_vertices, num_edges); if (show_console) { cout << endl; cout << "Num of vertices is: " << num_vertices << endl; cout << "Num of edges is: " << num_edges << endl; cout << endl; } /* ---- Begin Declaring and Allocating Graph Variables and Memory ---- */ // -- First, the CPU matrices -- // // The host dist matrices // h_dist_pc is going to be a huge chunk of memory for the dist matrix. // h_dist is going to be how we reference it uint64_t mem_size = (sizeof(uint8_t) * num_vertices * num_vertices); uint8_t **h_dist; uint8_t *h_dist_pc; uint8_t **cpu_dist; uint8_t *cpu_dist_pc; h_dist = (uint8_t **) malloc(num_vertices * sizeof(uint8_t *)); h_dist_pc = (uint8_t *) malloc(mem_size); if (check_output) { cpu_dist = (uint8_t **) malloc(num_vertices * sizeof(uint8_t *)); cpu_dist_pc = (uint8_t *) malloc(mem_size); } if (show_console) cout << "h_dist_pc = " << mem_size << " bytes (" << ((mem_size / 1024.0) / 1024.0) << " MB)" << endl; if (show_console) cout << endl; // Puts a pointer in dist[i] to a place in the chunk // of memory that will represent that row. // for (int i=0; i < num_vertices; i++) { h_dist[i] = h_dist_pc + (i * num_vertices); if (check_output) { cpu_dist[i] = cpu_dist_pc + (i * num_vertices); } } // The next matrix // h_next_pc is going to be a huge chunk of memory for the h_next matrix. // h_next is going to be how we reference it int **h_next; int *h_next_pc; int **cpu_next; int *cpu_next_pc; if (next_option == "next") { mem_size = (sizeof(int) * num_vertices * num_vertices); h_next = (int **) malloc(num_vertices * sizeof(int *)); h_next_pc = (int *) malloc(mem_size); if (check_output) { cpu_next = (int **) malloc(num_vertices * sizeof(int *)); cpu_next_pc = (int *) malloc(mem_size); } if (show_console) cout << "h_next_pc = " << mem_size << " bytes (" << ((mem_size / 1024.0) / 1024.0) << " MB)" << endl; if (show_console) cout << endl; // Puts a pointer in h_next[i] to a place in the chunk // of memory that will represent that row. for (int i=0; i < num_vertices; i++) { h_next[i] = h_next_pc + (i * num_vertices); if (check_output) { cpu_next[i] = cpu_next_pc + (i * num_vertices); } } } // -- Second, the GPU matrices -- // // The dist GPU matrix // mem_size = (sizeof(uint8_t) * num_vertices * num_vertices); uint8_t *d_dist_pc; cudaMalloc((void **) &d_dist_pc, (mem_size)); // The next GPU matrix // int *d_next_pc; if (next_option == "next") { mem_size = (sizeof(int) * num_vertices * num_vertices); cudaMalloc((void **) &d_next_pc, (mem_size)); } // Other GPU variables // uint32_t *d_num_vertices; cudaMalloc((void **) &d_num_vertices, sizeof(uint32_t)); /* ---- End of Graph Memory Allocation ---- */ /* ---- Prep the matrices ---- */ // Initialize all points in the h_dist matrix to INF // Initialize all points in the h_next matrix to -1 (PRN) // for (int i=0; i<num_vertices; i++) { for (int j=0; j<num_vertices; j++) { h_dist[i][j] = INF; if (check_output) cpu_dist[i][j] = INF; if (next_option == "next") { h_next[i][j] = -1; if (check_output) cpu_next[i][j] = -1; } } } /* ---- Read in graph and store the data in the dist matrix ---- */ bool use_next = false; if (next_option == "next") { use_next = true; } readInGraph(argv[1], h_dist, h_next, use_next); if (check_output) { readInGraph(argv[1], cpu_dist, cpu_next, use_next); } /* ---- Set the distances for each vertex for itself to be 0 in dist ---- */ for (int i=0; i<num_vertices; i++) { h_dist[i][i] = 0; if (check_output) cpu_dist[i][i] = 0; if (next_option == "next") { h_next[i][i] = i; if (check_output) cpu_next[i][i] = i; } } /* ---- Copy the data to the GPU ---- */ auto start_gpu_inc_copy = chrono::high_resolution_clock::now(); mem_size = (sizeof(uint8_t) * num_vertices * num_vertices); cudaMemcpy(d_dist_pc, h_dist_pc, mem_size, cudaMemcpyHostToDevice); if (next_option == "next") { mem_size = (sizeof(int) * num_vertices * num_vertices); cudaMemcpy(d_next_pc, h_next_pc, mem_size, cudaMemcpyHostToDevice); } cudaMemcpy(d_num_vertices, &num_vertices, sizeof(uint32_t), cudaMemcpyHostToDevice); /* ---- Run the F-W algorithm ---- */ // As we'll be using grids of 32 threads in the y-axis, then we'll want // the num_blocks in the y-axis to be a multiple of 32. And since it's OK // if there's a remainder, we can use integer division. However, since there // still is the 65,535 limit, we'll look at the number of vertices. int blocks; if (num_vertices < 65535) { if (num_vertices < 32) { blocks = 1; } else { blocks = (num_vertices + 31) / 32; } } else { blocks = 2047; // the most 32-thread blocks that can fit into 65,535 } dim3 grid(blocks,blocks,1); // we want to have a square matrix (to keep things simple!) dim3 block(32,32,1); // maximizes occupancy - 1,024 threads/block, so 2 blocks/SM (except for CC 7.5 and 8.0) if (show_console) cout << "About to run GPU F-W" << endl << endl; auto start_gpu = chrono::high_resolution_clock::now(); if (next_option == "next") { for (int k = 0; k < num_vertices; k++) { FW_IJ_loops_with_next<<<grid, block>>>(d_dist_pc, d_next_pc, num_vertices, k); } cudaDeviceSynchronize(); } else { for (int k = 0; k < num_vertices; k++) { FW_IJ_loops<<<grid, block>>>(d_dist_pc, num_vertices, k); } cudaDeviceSynchronize(); } auto stop_gpu = chrono::high_resolution_clock::now(); if (show_console) cout << "GPU F-W run complete" << endl << endl; /* ---- Copy data back to host ---- */ mem_size = (sizeof(uint8_t) * num_vertices * num_vertices); cudaMemcpy(h_dist_pc, d_dist_pc, mem_size, cudaMemcpyDeviceToHost); if (next_option == "next") { mem_size = (sizeof(int) * num_vertices * num_vertices); cudaMemcpy(h_next_pc, d_next_pc, mem_size, cudaMemcpyDeviceToHost); } auto stop_gpu_inc_copy = chrono::high_resolution_clock::now(); /* ---- If specified, run the CPU-based F-W algorithm on cpu_dist (and cpu_next PRN) matrix ---- */ double duration_cpu = 0; if (check_output) { if (show_console) cout << "About to start CPU F-W run" << endl << endl; if (next_option == "next") { FW_with_next(cpu_dist, cpu_next, num_vertices, duration_cpu); } else { FW_no_next(cpu_dist, num_vertices, duration_cpu); } if (show_console) cout << "CPU F-W run complete" << endl << endl; } /* ---- Check the matrices against each other ---- */ bool check_cleared = true; if (check_output) { if (show_console) cout << "About to start CPU/GPU comparison check" << endl << endl; for (int i=0; i < num_vertices; i++) { for (int j=0; j < num_vertices; j++) { if (h_dist[i][j] != cpu_dist[i][j]) { check_cleared = false; break; } if (next_option == "next") { if (h_next[i][j] != cpu_next[i][j]) { check_cleared = false; break; } } } if (check_cleared == false) { break; } } if (show_console) cout << "CPU/GPU comparison check complete" << endl << endl; } if (check_output) { if (check_cleared == false) { cout << endl; cout << "Check failed - CPU and GPU matrices DO NOT MATCH!" << endl; cout << endl; } else { cout << endl; cout << "Check PASSED - CPU and GPU matrices match" << endl; cout << endl; } } /* ---- Save the dist and next matrices to disk as called for ---- */ if (save_dist_results == "save_dist") { // save the dist matrix to disk saveDistMatrixToDisk(argv[1], h_dist, num_vertices, num_edges); } if ((save_next_results == "save_next") && (next_option == "next")) { // save the next matrix to disk saveNextMatrixToDisk(argv[1], h_next, num_vertices, num_edges); } /* ---- Print out the runtime for the algorithm itself ---- */ // Compute the time taken to run algorithm auto temp_duration_gpu = duration_cast<microseconds>(stop_gpu - start_gpu); auto temp_duration_gpu_inc_copy = duration_cast<microseconds>(stop_gpu_inc_copy - start_gpu_inc_copy); double duration_gpu = temp_duration_gpu.count(); double duration_gpu_inc_copy = temp_duration_gpu_inc_copy.count(); if (show_console) { cout << "Runtime for the GPU F-W algorithm itself is: " << (duration_gpu / 1000.0) << " milliseconds" << endl; cout << "Runtime for the GPU F-W algorithm itself is: " << ((duration_gpu / 1000.0) / 1000.0) << " seconds" << endl; cout << endl; cout << "Runtime for the GPU F-W algorithm with mem copy is: " << (duration_gpu_inc_copy / 1000.0) << " milliseconds" << endl; cout << "Runtime for the GPU F-W algorithm with mem copy is: " << ((duration_gpu_inc_copy / 1000.0) / 1000.0) << " seconds" << endl; cout << endl; if (check_output) { cout << "Runtime for the CPU F-W algorithm is: " << (duration_cpu / 1000.0) << " milliseconds" << endl; cout << "Runtime for the CPU F-W algorithm is: " << ((duration_cpu / 1000.0) / 1000.0) << " seconds" << endl; cout << endl; } } // Free up malloc'ed memory // free(h_dist); free(h_dist_pc); if (check_output) { free(cpu_dist); free(cpu_dist_pc); } if (next_option == "next") { free(h_next); free(h_next_pc); if (check_output) { free(cpu_next); free(cpu_next_pc); } cudaFree(d_next_pc); } cudaFree(d_dist_pc); cudaFree(d_num_vertices); // output the result in microseconds // cout << setprecision(15) << duration_gpu; if (show_console) cout << endl; return 0; }
de3c5b55412d4cfc89d6820b571eb1620bdf7ca6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020-2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/apply_adam_with_amsgrad_impl.cuh" #include "include/hip/hip_fp16.h" template <typename T> __device__ __forceinline__ T sqrtFunc(T x) { return sqrt(x); } template <> __device__ __forceinline__ half sqrtFunc(half x) { return hsqrt(x); } template <typename T> __device__ __forceinline__ T maxFunc(T x, T y) { return x > y ? x : y; } template <> __device__ __forceinline__ half maxFunc(half x, half y) { return x > y ? x : y; } template <typename T> __global__ void CalApplyAdamWithAmsgradKernel(const size_t size, const int64_t batch_size, T *var, T *m, T *v, T *vhat, T *beta1_power, T *beta2_power, const T *lr, const T *grad, const T beta1, const T beta2, const T epsilon, T *output_var, T *output_m, T *output_v, T *output_vhat) { auto all_elements = size * batch_size; const T one = static_cast<T>(1.0); for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < all_elements; pos += gridDim.x * blockDim.x) { auto batch = pos / size; auto new_learning_rate = lr[batch] * sqrtFunc(one - beta2_power[batch]) / (one - beta1_power[batch]); m[pos] += (grad[pos] - m[pos]) * (one - static_cast<T>(beta1)); output_m[pos] = m[pos]; v[pos] += (grad[pos] * grad[pos] - v[pos]) * (one - static_cast<T>(beta2)); output_v[pos] = v[pos]; vhat[pos] = maxFunc(vhat[pos], v[pos]); output_vhat[pos] = vhat[pos]; var[pos] -= new_learning_rate * m[pos] / (sqrtFunc(vhat[pos]) + static_cast<T>(epsilon)); output_var[pos] = var[pos]; } } template <typename T> hipError_t CalApplyAdamWithAmsgrad(const size_t size, const int64_t batch_size, T *var, T *m, T *v, T *vhat, T *beta1_power, T *beta2_power, const T *lr, const T *grad, const T beta1, const T beta2, const T epsilon, T *output_var, T *output_m, T *output_v, T *output_vhat, const uint32_t &device_id, hipStream_t stream_ptr) { hipLaunchKernelGGL(( CalApplyAdamWithAmsgradKernel), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, stream_ptr, size, batch_size, var, m, v, vhat, beta1_power, beta2_power, lr, grad, beta1, beta2, epsilon, output_var, output_m, output_v, output_vhat); CHECK_CUDA_LAUNCH_SUCCESS(); } template CUDA_LIB_EXPORT hipError_t CalApplyAdamWithAmsgrad<double>( const size_t size, const int64_t batch_size, double *var, double *m, double *v, double *vhat, double *beta1_power, double *beta2_power, const double *lr, const double *grad, const double beta1, const double beta2, const double epsilon, double *output_var, double *output_m, double *output_v, double *output_vhat, const uint32_t &device_id, hipStream_t stream_ptr); template CUDA_LIB_EXPORT hipError_t CalApplyAdamWithAmsgrad<float>( const size_t size, const int64_t batch_size, float *var, float *m, float *v, float *vhat, float *beta1_power, float *beta2_power, const float *lr, const float *grad, const float beta1, const float beta2, const float epsilon, float *output_var, float *output_m, float *output_v, float *output_vhat, const uint32_t &device_id, hipStream_t stream_ptr); template CUDA_LIB_EXPORT hipError_t CalApplyAdamWithAmsgrad<half>(const size_t size, const int64_t batch_size, half *var, half *m, half *v, half *vhat, half *beta1_power, half *beta2_power, const half *lr, const half *grad, const half beta1, const half beta2, const half epsilon, half *output_var, half *output_m, half *output_v, half *output_vhat, const uint32_t &device_id, hipStream_t stream_ptr);
de3c5b55412d4cfc89d6820b571eb1620bdf7ca6.cu
/** * Copyright 2020-2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/apply_adam_with_amsgrad_impl.cuh" #include "include/cuda_fp16.h" template <typename T> __device__ __forceinline__ T sqrtFunc(T x) { return sqrt(x); } template <> __device__ __forceinline__ half sqrtFunc(half x) { return hsqrt(x); } template <typename T> __device__ __forceinline__ T maxFunc(T x, T y) { return x > y ? x : y; } template <> __device__ __forceinline__ half maxFunc(half x, half y) { return x > y ? x : y; } template <typename T> __global__ void CalApplyAdamWithAmsgradKernel(const size_t size, const int64_t batch_size, T *var, T *m, T *v, T *vhat, T *beta1_power, T *beta2_power, const T *lr, const T *grad, const T beta1, const T beta2, const T epsilon, T *output_var, T *output_m, T *output_v, T *output_vhat) { auto all_elements = size * batch_size; const T one = static_cast<T>(1.0); for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < all_elements; pos += gridDim.x * blockDim.x) { auto batch = pos / size; auto new_learning_rate = lr[batch] * sqrtFunc(one - beta2_power[batch]) / (one - beta1_power[batch]); m[pos] += (grad[pos] - m[pos]) * (one - static_cast<T>(beta1)); output_m[pos] = m[pos]; v[pos] += (grad[pos] * grad[pos] - v[pos]) * (one - static_cast<T>(beta2)); output_v[pos] = v[pos]; vhat[pos] = maxFunc(vhat[pos], v[pos]); output_vhat[pos] = vhat[pos]; var[pos] -= new_learning_rate * m[pos] / (sqrtFunc(vhat[pos]) + static_cast<T>(epsilon)); output_var[pos] = var[pos]; } } template <typename T> cudaError_t CalApplyAdamWithAmsgrad(const size_t size, const int64_t batch_size, T *var, T *m, T *v, T *vhat, T *beta1_power, T *beta2_power, const T *lr, const T *grad, const T beta1, const T beta2, const T epsilon, T *output_var, T *output_m, T *output_v, T *output_vhat, const uint32_t &device_id, cudaStream_t stream_ptr) { CalApplyAdamWithAmsgradKernel<<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, stream_ptr>>>( size, batch_size, var, m, v, vhat, beta1_power, beta2_power, lr, grad, beta1, beta2, epsilon, output_var, output_m, output_v, output_vhat); CHECK_CUDA_LAUNCH_SUCCESS(); } template CUDA_LIB_EXPORT cudaError_t CalApplyAdamWithAmsgrad<double>( const size_t size, const int64_t batch_size, double *var, double *m, double *v, double *vhat, double *beta1_power, double *beta2_power, const double *lr, const double *grad, const double beta1, const double beta2, const double epsilon, double *output_var, double *output_m, double *output_v, double *output_vhat, const uint32_t &device_id, cudaStream_t stream_ptr); template CUDA_LIB_EXPORT cudaError_t CalApplyAdamWithAmsgrad<float>( const size_t size, const int64_t batch_size, float *var, float *m, float *v, float *vhat, float *beta1_power, float *beta2_power, const float *lr, const float *grad, const float beta1, const float beta2, const float epsilon, float *output_var, float *output_m, float *output_v, float *output_vhat, const uint32_t &device_id, cudaStream_t stream_ptr); template CUDA_LIB_EXPORT cudaError_t CalApplyAdamWithAmsgrad<half>(const size_t size, const int64_t batch_size, half *var, half *m, half *v, half *vhat, half *beta1_power, half *beta2_power, const half *lr, const half *grad, const half beta1, const half beta2, const half epsilon, half *output_var, half *output_m, half *output_v, half *output_vhat, const uint32_t &device_id, cudaStream_t stream_ptr);
b00c999fee4dbe3ed40ecbca630d2f75d232f2ae.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/AccumulateType.h> #include <ATen/hip/Exceptions.h> #include <cmath> #include <limits> #include <thrust/device_ptr.h> #include <thrust/sequence.h> namespace at { namespace native { template<typename T, typename accT = T> struct LinspaceOp { __host__ __device__ LinspaceOp(accT start, accT step): start_(start), step_(step) { } __device__ __forceinline__ T operator()(ptrdiff_t index) { accT increment = step_ * static_cast<accT>(index); accT value = start_ + increment; return static_cast<T>(value); } const accT start_, step_; }; template<typename T, typename accT = T> struct LogspaceOp { __host__ __device__ LogspaceOp(accT start, accT step): start_(start), step_(step) { } __device__ __forceinline__ T operator()(ptrdiff_t index) { accT increment = step_ * static_cast<accT>(index); accT base10 = 10; accT value = ::pow(base10, start_ + increment); return static_cast<T>(value); } const accT start_, step_; }; Tensor& linspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps) { AT_CHECK(steps >= 0, "number of steps must be non-negative"); if (result.numel() != steps) { result.resize_({steps}); } Tensor r = result.is_contiguous() ? result : result.contiguous(); if (steps == 0) { // skip } else if (steps == 1) { r.fill_(start); } else { AT_DISPATCH_FLOATING_TYPES(r.scalar_type(), "linspace_cuda", [&]() { scalar_t scalar_start = start.to<scalar_t>(); scalar_t scalar_end = end.to<scalar_t>(); scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1); LinspaceOp<scalar_t> linspace_method(scalar_start, step); thrust::device_ptr<scalar_t> data_(r.data<scalar_t>()); thrust::tabulate(data_, data_ + steps, linspace_method); }); } if (!result.is_contiguous()) { result.copy_(r); } AT_CUDA_CHECK(hipGetLastError()); return result; } Tensor& logspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps) { AT_CHECK(steps >= 0, "number of steps must be non-negative"); if (result.numel() != steps) { result.resize_({steps}); } Tensor r = result.is_contiguous() ? result : result.contiguous(); if (steps == 0) { // skip } else if (steps == 1) { r.fill_(::pow(10.0, start.to<double>())); } else { AT_DISPATCH_FLOATING_TYPES(r.scalar_type(), "logspace_cuda", [&]() { scalar_t scalar_start = start.to<scalar_t>(); scalar_t scalar_end = end.to<scalar_t>(); scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1); LogspaceOp<scalar_t> logspace_method(scalar_start, step); thrust::device_ptr<scalar_t> data_(r.data<scalar_t>()); thrust::tabulate(data_, data_ + steps, logspace_method); }); } if (!result.is_contiguous()) { result.copy_(r); } AT_CUDA_CHECK(hipGetLastError()); return result; } Tensor& range_cuda_out(Tensor& result, Scalar start, Scalar end, Scalar step) { AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "range_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, true>; auto xstart = start.to<accscalar_t>(); auto xend = end.to<accscalar_t>(); auto xstep = step.to<accscalar_t>(); AT_CHECK(xstep > 0 || xstep < 0, "step must be nonzero"); AT_CHECK(std::isfinite(static_cast<double>(xstart)) && std::isfinite(static_cast<double>(xend)), "unsupported range: ", xstart, " -> ", xend); AT_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)), "upper bound and larger bound inconsistent with step sign"); int64_t size = static_cast<int64_t>(((xend - xstart) / xstep) + 1); if (result.numel() != size) { result.resize_({size}); } Tensor r = result.is_contiguous() ? result : result.contiguous(); LinspaceOp<scalar_t, accscalar_t> linspace_method(xstart, xstep); thrust::device_ptr<scalar_t> data_ptr(r.data<scalar_t>()); thrust::tabulate(data_ptr, data_ptr + size, linspace_method); if (!result.is_contiguous()) { result.copy_(r); } }); AT_CUDA_CHECK(hipGetLastError()); return result; } Tensor& arange_cuda_out(Tensor& result, Scalar start, Scalar end, Scalar step) { AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "arange_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, true>; auto xstart = start.to<accscalar_t>(); auto xend = end.to<accscalar_t>(); auto xstep = step.to<accscalar_t>(); // we use double precision for (start - end) / step // to compute size_d for consistency across devices. // The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t, // but double on cpu for the same, // and the effective output size starts differing on CPU vs GPU because of precision issues, which // we dont want. // the corner-case we do want to take into account is int64_t, which has higher precision than double double size_d; if (std::is_same<scalar_t, int64_t>::value) { size_d = ::ceil(static_cast<double>(end.to<accscalar_t>() - start.to<accscalar_t>()) / step.to<accscalar_t>()); } else { size_d = ::ceil(static_cast<double>(end.to<double>() - start.to<double>()) / step.to<double>()); } AT_CHECK(xstep > 0 || xstep < 0, "step must be nonzero"); AT_CHECK(std::isfinite(static_cast<double>(xstart)) && std::isfinite(static_cast<double>(xend)), "unsupported range: ", xstart, " -> ", xend); AT_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)), "upper bound and larger bound inconsistent with step sign"); AT_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()), "invalid size, possible overflow?"); int64_t size = static_cast<int64_t>(size_d); if (result.numel() != size) { result.resize_({size}); } Tensor r = result.is_contiguous() ? result : result.contiguous(); LinspaceOp<scalar_t, accscalar_t> linspace_method(xstart, xstep); thrust::device_ptr<scalar_t> data_ptr(r.data<scalar_t>()); thrust::tabulate(data_ptr, data_ptr + size, linspace_method); if (!result.is_contiguous()) { result.copy_(r); } }); AT_CUDA_CHECK(hipGetLastError()); return result; } }} // namespace at::native
b00c999fee4dbe3ed40ecbca630d2f75d232f2ae.cu
#include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/Exceptions.h> #include <cmath> #include <limits> #include <thrust/device_ptr.h> #include <thrust/sequence.h> namespace at { namespace native { template<typename T, typename accT = T> struct LinspaceOp { __host__ __device__ LinspaceOp(accT start, accT step): start_(start), step_(step) { } __device__ __forceinline__ T operator()(ptrdiff_t index) { accT increment = step_ * static_cast<accT>(index); accT value = start_ + increment; return static_cast<T>(value); } const accT start_, step_; }; template<typename T, typename accT = T> struct LogspaceOp { __host__ __device__ LogspaceOp(accT start, accT step): start_(start), step_(step) { } __device__ __forceinline__ T operator()(ptrdiff_t index) { accT increment = step_ * static_cast<accT>(index); accT base10 = 10; accT value = std::pow(base10, start_ + increment); return static_cast<T>(value); } const accT start_, step_; }; Tensor& linspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps) { AT_CHECK(steps >= 0, "number of steps must be non-negative"); if (result.numel() != steps) { result.resize_({steps}); } Tensor r = result.is_contiguous() ? result : result.contiguous(); if (steps == 0) { // skip } else if (steps == 1) { r.fill_(start); } else { AT_DISPATCH_FLOATING_TYPES(r.scalar_type(), "linspace_cuda", [&]() { scalar_t scalar_start = start.to<scalar_t>(); scalar_t scalar_end = end.to<scalar_t>(); scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1); LinspaceOp<scalar_t> linspace_method(scalar_start, step); thrust::device_ptr<scalar_t> data_(r.data<scalar_t>()); thrust::tabulate(data_, data_ + steps, linspace_method); }); } if (!result.is_contiguous()) { result.copy_(r); } AT_CUDA_CHECK(cudaGetLastError()); return result; } Tensor& logspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps) { AT_CHECK(steps >= 0, "number of steps must be non-negative"); if (result.numel() != steps) { result.resize_({steps}); } Tensor r = result.is_contiguous() ? result : result.contiguous(); if (steps == 0) { // skip } else if (steps == 1) { r.fill_(std::pow(10.0, start.to<double>())); } else { AT_DISPATCH_FLOATING_TYPES(r.scalar_type(), "logspace_cuda", [&]() { scalar_t scalar_start = start.to<scalar_t>(); scalar_t scalar_end = end.to<scalar_t>(); scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1); LogspaceOp<scalar_t> logspace_method(scalar_start, step); thrust::device_ptr<scalar_t> data_(r.data<scalar_t>()); thrust::tabulate(data_, data_ + steps, logspace_method); }); } if (!result.is_contiguous()) { result.copy_(r); } AT_CUDA_CHECK(cudaGetLastError()); return result; } Tensor& range_cuda_out(Tensor& result, Scalar start, Scalar end, Scalar step) { AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "range_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, true>; auto xstart = start.to<accscalar_t>(); auto xend = end.to<accscalar_t>(); auto xstep = step.to<accscalar_t>(); AT_CHECK(xstep > 0 || xstep < 0, "step must be nonzero"); AT_CHECK(std::isfinite(static_cast<double>(xstart)) && std::isfinite(static_cast<double>(xend)), "unsupported range: ", xstart, " -> ", xend); AT_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)), "upper bound and larger bound inconsistent with step sign"); int64_t size = static_cast<int64_t>(((xend - xstart) / xstep) + 1); if (result.numel() != size) { result.resize_({size}); } Tensor r = result.is_contiguous() ? result : result.contiguous(); LinspaceOp<scalar_t, accscalar_t> linspace_method(xstart, xstep); thrust::device_ptr<scalar_t> data_ptr(r.data<scalar_t>()); thrust::tabulate(data_ptr, data_ptr + size, linspace_method); if (!result.is_contiguous()) { result.copy_(r); } }); AT_CUDA_CHECK(cudaGetLastError()); return result; } Tensor& arange_cuda_out(Tensor& result, Scalar start, Scalar end, Scalar step) { AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "arange_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, true>; auto xstart = start.to<accscalar_t>(); auto xend = end.to<accscalar_t>(); auto xstep = step.to<accscalar_t>(); // we use double precision for (start - end) / step // to compute size_d for consistency across devices. // The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t, // but double on cpu for the same, // and the effective output size starts differing on CPU vs GPU because of precision issues, which // we dont want. // the corner-case we do want to take into account is int64_t, which has higher precision than double double size_d; if (std::is_same<scalar_t, int64_t>::value) { size_d = std::ceil(static_cast<double>(end.to<accscalar_t>() - start.to<accscalar_t>()) / step.to<accscalar_t>()); } else { size_d = std::ceil(static_cast<double>(end.to<double>() - start.to<double>()) / step.to<double>()); } AT_CHECK(xstep > 0 || xstep < 0, "step must be nonzero"); AT_CHECK(std::isfinite(static_cast<double>(xstart)) && std::isfinite(static_cast<double>(xend)), "unsupported range: ", xstart, " -> ", xend); AT_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)), "upper bound and larger bound inconsistent with step sign"); AT_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()), "invalid size, possible overflow?"); int64_t size = static_cast<int64_t>(size_d); if (result.numel() != size) { result.resize_({size}); } Tensor r = result.is_contiguous() ? result : result.contiguous(); LinspaceOp<scalar_t, accscalar_t> linspace_method(xstart, xstep); thrust::device_ptr<scalar_t> data_ptr(r.data<scalar_t>()); thrust::tabulate(data_ptr, data_ptr + size, linspace_method); if (!result.is_contiguous()) { result.copy_(r); } }); AT_CUDA_CHECK(cudaGetLastError()); return result; } }} // namespace at::native
a8a5f41857cc45234607e783e0c390351fce2a3b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* @author: lugao */ #include <cstdio> #include <cmath> #include <vector> #include <string> #include <ctime> #include <sstream> #include <chrono> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/extrema.h> #include <thrust/random.h> #include "matplotlibcpp.h" namespace plt = matplotlibcpp; using namespace std; static chrono::time_point <chrono::high_resolution_clock> tstart; static void tic() { tstart = chrono::high_resolution_clock::now(); } static void toc() { auto tstop = chrono::high_resolution_clock::now(); auto dt = 1.e-9 * std::chrono::duration_cast<chrono::nanoseconds>(tstop - tstart).count(); printf("Elapsed time is %f seconds.\n", dt); } void cumsum2D(int *m, int v, int w, int h, int *out) { int W = w + 1; int H = h + 1; for (int x = 0; x < W; ++x) out[0 * W + x] = 0; for (int y = 0; y < H; ++y) out[y * W + 0] = 0; for (int y = 1; y < H; y++) { for (int x = 1; x < W; ++x) { out[y * W + x] = m[(y - 1) * w + (x - 1)] == v; } } for (int y = 0; y < H; y++) { for (int x = 1; x < W; ++x) { out[y * W + x] = out[y * W + x] + out[y * W + (x - 1)]; } } for (int x = 0; x < W; ++x) { for (int y = 1; y < H; y++) { out[y * W + x] = out[(y - 1) * W + x] + out[y * W + x]; } } } template<typename Numeric> void show(Numeric *image, int w, int h, const string &map = "Greys") { plt::figure(); plt::imshow(image, w, h, map); plt::xlim(0, w); plt::ylim(0, h); plt::show(); } __global__ void paintSquare(int *a, int w, int h, int x, int y, int s, int v) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < s * s) { int xx = i % s; int yy = i / s; a[(y + yy) * w + (x + xx)] = v; } } int appendOperations(vector <string> &operations, int *image, int w, int h, int x, int y, int s) { int painted = 0; stringstream ss; for (int xx = x; xx < x + s; ++xx) { for (int yy = y; yy < y + s; ++yy) { if (image[yy * w + xx] == 0) { ss << "ERASE," << xx << "," << yy; operations.push_back(ss.str()); ss.str(string()); } else if (image[yy * w + xx] == 1) { painted++; } } } if (painted != 0) { ss << "FILL," << x << "," << y << "," << s; operations.push_back(ss.str()); } return painted; } __global__ void buildScoreMatrix(int *m0, int *m1, /*int *m2,*/ int w, int h, float *score, bool onlyFill) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < w * h) { int x = i % w; int y = i / w; int minD = min(w, h); float maxScore = -1; int maxS = 0; int maxCount0 = 0; //int maxCount1 = 0; for (int s = minD; s > 0; --s) { if (x + s <= w && y + s <= h) { int count0 = m0[y * (w + 1) + x] + m0[(y + s) * (w + 1) + (x + s)] - m0[(y + s) * (w + 1) + x] - m0[y * (w + 1) + (x + s)]; int count1 = m1[y * (w + 1) + x] + m1[(y + s) * (w + 1) + (x + s)] - m1[(y + s) * (w + 1) + x] - m1[y * (w + 1) + (x + s)]; float score = count1 / (1.0f + count0 * 0.390f); if (onlyFill) score = (1.0f * count1) / (1.0f + 10 * count0); if (maxScore < score || (maxScore == score && count0 < maxCount0)) { maxScore = score; maxS = s; maxCount0 = count0; //maxCount1 = count1; } } } score[y * w + x] = maxScore - 0.1f * maxCount0;//(1.0f * maxCount1) / (1.0f + 0.390f*maxCount0); if (onlyFill) score[y * w + x] = maxScore; score[(w * h) + y * w + x] = maxS; } } void greedySolution(int pixelsLeft, int *image, int w, int h, vector <string> &operations, int debugLevel = 1) { /* Debug levels * 0: no report nor image * 1: report every 1 second * 2: report and image every 1 second * 3: report and image every step */ int totalPixels = pixelsLeft; int prediction = pixelsLeft; int *image_d, *m0_d, *m1_d; //, *m2_d float *score_d; int *m0 = (int *) malloc((w + 1) * (h + 1) * sizeof(int)); int *m1 = (int *) malloc((w + 1) * (h + 1) * sizeof(int)); float *score = (float *) malloc(2 * w * h * sizeof(float)); hipMalloc(&m0_d, (w + 1) * (h + 1) * sizeof(int)); hipMalloc(&m1_d, (w + 1) * (h + 1) * sizeof(int)); hipMalloc(&image_d, w * h * sizeof(int)); hipMalloc(&score_d, 2 * w * h * sizeof(float)); tic(); int minPred = w * h; int counter = 0; auto timer = chrono::high_resolution_clock::now(); while (pixelsLeft > 0) { cumsum2D(image, 0, w, h, m0); cumsum2D(image, 1, w, h, m1); hipMemcpy(m0_d, m0, (w + 1) * (h + 1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(m1_d, m1, (w + 1) * (h + 1) * sizeof(int), hipMemcpyHostToDevice); //-----------------------------------CUDA----------------------------------- buildScoreMatrix << < (w * h + 1023) / 1024, 1024 >> > (m0_d, m1_d, w, h, score_d, prediction <= 3050); hipMemcpy(score, score_d, 2 * w * h * sizeof(float), hipMemcpyDeviceToHost); //-----------------------------------CUDA----------------------------------- thrust::host_vector<float> h_vec(score, score + w * h); thrust::device_vector<float> d_vec = h_vec; thrust::device_vector<float>::iterator iter = thrust::max_element(d_vec.begin(), d_vec.end()); unsigned int position = iter - d_vec.begin(); float max_val = *iter; int sqX = position % w; int sqY = position / w; int sqS = (int) score[w * h + position]; if (debugLevel >= 3) printf("Best square at (%d,%d) with size %d\n", sqX, sqY, sqS); pixelsLeft -= appendOperations(operations, image, w, h, sqX, sqY, sqS); hipMemcpy(image_d, image, w * h * sizeof(int), hipMemcpyHostToDevice); //-----------------------------------CUDA----------------------------------- paintSquare << < (sqS * sqS + 1023) / 1024, 1024 >> > (image_d, w, h, sqX, sqY, sqS, 2); hipMemcpy(image, image_d, w * h * sizeof(int), hipMemcpyDeviceToHost); //-----------------------------------CUDA----------------------------------- h_vec.clear(); thrust::host_vector<float>().swap(h_vec); d_vec.clear(); thrust::device_vector<float>().swap(d_vec); int totalOperations = (int) operations.size(); prediction = totalOperations + pixelsLeft; minPred = min(prediction, minPred); auto now = chrono::high_resolution_clock::now(); auto dt = 1.e-9 * std::chrono::duration_cast<chrono::nanoseconds>(now - timer).count(); if (pixelsLeft <= 0 || (counter % 10 == 0 && prediction < 3500) || (debugLevel <= 2 && dt > 1)) { printf("[%6.2f%%] Total operations: %4d | Pixels left: %5d | Prediction <= %5d | MinPred = %5d \n", (totalPixels - pixelsLeft) * 100.0f / totalPixels, totalOperations, pixelsLeft, prediction, minPred); if (debugLevel == 2) show(image, w, h); if (debugLevel <= 2) timer = chrono::high_resolution_clock::now(); } counter++; if (debugLevel >= 3) show(image, w, h); } toc(); free(m0); free(m1); free(image); free(score); hipFree(m0_d); hipFree(m1_d); hipFree(image_d); hipFree(score_d); } int readMatrix(int *a, int w, int h) { int totalPixels = 0; for (int y = 0; y < h; y++) { char buffer[w + 10]; if (scanf("%s", buffer) == 0) printf("Failed to read line %d", y); for (int x = 0; x < w; ++x) { a[y * w + x] = buffer[x] == '#'; totalPixels += buffer[x] == '#'; } } return totalPixels; } int main(void) { //Redirect input file to stdin if (freopen("../input_0.txt", "r", stdin) == nullptr) printf("Failed redirecting input file to stdin."); //Reade the matrix dimensions int w, h; if (scanf("%d,%d", &w, &h) != 2) printf("Failed reading matrix dimentions."); //Allocate image matrix and operations vector int *image = (int *) malloc(w * h * sizeof(int)); vector <string> operations; //Read the matrix from the file int pixelsLeft = readMatrix(image, w, h); //Shows the image using python's matplotlib show(image, w, h); //Execute the greedy solution greedySolution(pixelsLeft, image, w, h, operations); if (system("python ../src/verificationTool.py") != 0) printf("Verification failed!"); //Write the solution to the file in a reversed order printf("Writting solution..."); if (freopen("../output_0.txt", "w", stdout) == nullptr) printf("Failed redirecting stdout to output file."); for (auto it = operations.rbegin(); it < operations.rend(); it++) printf("%s\n", it->c_str()); fclose(stdout); }
a8a5f41857cc45234607e783e0c390351fce2a3b.cu
/* @author: lugao */ #include <cstdio> #include <cmath> #include <vector> #include <string> #include <ctime> #include <sstream> #include <chrono> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/extrema.h> #include <thrust/random.h> #include "matplotlibcpp.h" namespace plt = matplotlibcpp; using namespace std; static chrono::time_point <chrono::high_resolution_clock> tstart; static void tic() { tstart = chrono::high_resolution_clock::now(); } static void toc() { auto tstop = chrono::high_resolution_clock::now(); auto dt = 1.e-9 * std::chrono::duration_cast<chrono::nanoseconds>(tstop - tstart).count(); printf("Elapsed time is %f seconds.\n", dt); } void cumsum2D(int *m, int v, int w, int h, int *out) { int W = w + 1; int H = h + 1; for (int x = 0; x < W; ++x) out[0 * W + x] = 0; for (int y = 0; y < H; ++y) out[y * W + 0] = 0; for (int y = 1; y < H; y++) { for (int x = 1; x < W; ++x) { out[y * W + x] = m[(y - 1) * w + (x - 1)] == v; } } for (int y = 0; y < H; y++) { for (int x = 1; x < W; ++x) { out[y * W + x] = out[y * W + x] + out[y * W + (x - 1)]; } } for (int x = 0; x < W; ++x) { for (int y = 1; y < H; y++) { out[y * W + x] = out[(y - 1) * W + x] + out[y * W + x]; } } } template<typename Numeric> void show(Numeric *image, int w, int h, const string &map = "Greys") { plt::figure(); plt::imshow(image, w, h, map); plt::xlim(0, w); plt::ylim(0, h); plt::show(); } __global__ void paintSquare(int *a, int w, int h, int x, int y, int s, int v) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < s * s) { int xx = i % s; int yy = i / s; a[(y + yy) * w + (x + xx)] = v; } } int appendOperations(vector <string> &operations, int *image, int w, int h, int x, int y, int s) { int painted = 0; stringstream ss; for (int xx = x; xx < x + s; ++xx) { for (int yy = y; yy < y + s; ++yy) { if (image[yy * w + xx] == 0) { ss << "ERASE," << xx << "," << yy; operations.push_back(ss.str()); ss.str(string()); } else if (image[yy * w + xx] == 1) { painted++; } } } if (painted != 0) { ss << "FILL," << x << "," << y << "," << s; operations.push_back(ss.str()); } return painted; } __global__ void buildScoreMatrix(int *m0, int *m1, /*int *m2,*/ int w, int h, float *score, bool onlyFill) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < w * h) { int x = i % w; int y = i / w; int minD = min(w, h); float maxScore = -1; int maxS = 0; int maxCount0 = 0; //int maxCount1 = 0; for (int s = minD; s > 0; --s) { if (x + s <= w && y + s <= h) { int count0 = m0[y * (w + 1) + x] + m0[(y + s) * (w + 1) + (x + s)] - m0[(y + s) * (w + 1) + x] - m0[y * (w + 1) + (x + s)]; int count1 = m1[y * (w + 1) + x] + m1[(y + s) * (w + 1) + (x + s)] - m1[(y + s) * (w + 1) + x] - m1[y * (w + 1) + (x + s)]; float score = count1 / (1.0f + count0 * 0.390f); if (onlyFill) score = (1.0f * count1) / (1.0f + 10 * count0); if (maxScore < score || (maxScore == score && count0 < maxCount0)) { maxScore = score; maxS = s; maxCount0 = count0; //maxCount1 = count1; } } } score[y * w + x] = maxScore - 0.1f * maxCount0;//(1.0f * maxCount1) / (1.0f + 0.390f*maxCount0); if (onlyFill) score[y * w + x] = maxScore; score[(w * h) + y * w + x] = maxS; } } void greedySolution(int pixelsLeft, int *image, int w, int h, vector <string> &operations, int debugLevel = 1) { /* Debug levels * 0: no report nor image * 1: report every 1 second * 2: report and image every 1 second * 3: report and image every step */ int totalPixels = pixelsLeft; int prediction = pixelsLeft; int *image_d, *m0_d, *m1_d; //, *m2_d float *score_d; int *m0 = (int *) malloc((w + 1) * (h + 1) * sizeof(int)); int *m1 = (int *) malloc((w + 1) * (h + 1) * sizeof(int)); float *score = (float *) malloc(2 * w * h * sizeof(float)); cudaMalloc(&m0_d, (w + 1) * (h + 1) * sizeof(int)); cudaMalloc(&m1_d, (w + 1) * (h + 1) * sizeof(int)); cudaMalloc(&image_d, w * h * sizeof(int)); cudaMalloc(&score_d, 2 * w * h * sizeof(float)); tic(); int minPred = w * h; int counter = 0; auto timer = chrono::high_resolution_clock::now(); while (pixelsLeft > 0) { cumsum2D(image, 0, w, h, m0); cumsum2D(image, 1, w, h, m1); cudaMemcpy(m0_d, m0, (w + 1) * (h + 1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(m1_d, m1, (w + 1) * (h + 1) * sizeof(int), cudaMemcpyHostToDevice); //-----------------------------------CUDA----------------------------------- buildScoreMatrix << < (w * h + 1023) / 1024, 1024 >> > (m0_d, m1_d, w, h, score_d, prediction <= 3050); cudaMemcpy(score, score_d, 2 * w * h * sizeof(float), cudaMemcpyDeviceToHost); //-----------------------------------CUDA----------------------------------- thrust::host_vector<float> h_vec(score, score + w * h); thrust::device_vector<float> d_vec = h_vec; thrust::device_vector<float>::iterator iter = thrust::max_element(d_vec.begin(), d_vec.end()); unsigned int position = iter - d_vec.begin(); float max_val = *iter; int sqX = position % w; int sqY = position / w; int sqS = (int) score[w * h + position]; if (debugLevel >= 3) printf("Best square at (%d,%d) with size %d\n", sqX, sqY, sqS); pixelsLeft -= appendOperations(operations, image, w, h, sqX, sqY, sqS); cudaMemcpy(image_d, image, w * h * sizeof(int), cudaMemcpyHostToDevice); //-----------------------------------CUDA----------------------------------- paintSquare << < (sqS * sqS + 1023) / 1024, 1024 >> > (image_d, w, h, sqX, sqY, sqS, 2); cudaMemcpy(image, image_d, w * h * sizeof(int), cudaMemcpyDeviceToHost); //-----------------------------------CUDA----------------------------------- h_vec.clear(); thrust::host_vector<float>().swap(h_vec); d_vec.clear(); thrust::device_vector<float>().swap(d_vec); int totalOperations = (int) operations.size(); prediction = totalOperations + pixelsLeft; minPred = min(prediction, minPred); auto now = chrono::high_resolution_clock::now(); auto dt = 1.e-9 * std::chrono::duration_cast<chrono::nanoseconds>(now - timer).count(); if (pixelsLeft <= 0 || (counter % 10 == 0 && prediction < 3500) || (debugLevel <= 2 && dt > 1)) { printf("[%6.2f%%] Total operations: %4d | Pixels left: %5d | Prediction <= %5d | MinPred = %5d \n", (totalPixels - pixelsLeft) * 100.0f / totalPixels, totalOperations, pixelsLeft, prediction, minPred); if (debugLevel == 2) show(image, w, h); if (debugLevel <= 2) timer = chrono::high_resolution_clock::now(); } counter++; if (debugLevel >= 3) show(image, w, h); } toc(); free(m0); free(m1); free(image); free(score); cudaFree(m0_d); cudaFree(m1_d); cudaFree(image_d); cudaFree(score_d); } int readMatrix(int *a, int w, int h) { int totalPixels = 0; for (int y = 0; y < h; y++) { char buffer[w + 10]; if (scanf("%s", buffer) == 0) printf("Failed to read line %d", y); for (int x = 0; x < w; ++x) { a[y * w + x] = buffer[x] == '#'; totalPixels += buffer[x] == '#'; } } return totalPixels; } int main(void) { //Redirect input file to stdin if (freopen("../input_0.txt", "r", stdin) == nullptr) printf("Failed redirecting input file to stdin."); //Reade the matrix dimensions int w, h; if (scanf("%d,%d", &w, &h) != 2) printf("Failed reading matrix dimentions."); //Allocate image matrix and operations vector int *image = (int *) malloc(w * h * sizeof(int)); vector <string> operations; //Read the matrix from the file int pixelsLeft = readMatrix(image, w, h); //Shows the image using python's matplotlib show(image, w, h); //Execute the greedy solution greedySolution(pixelsLeft, image, w, h, operations); if (system("python ../src/verificationTool.py") != 0) printf("Verification failed!"); //Write the solution to the file in a reversed order printf("Writting solution..."); if (freopen("../output_0.txt", "w", stdout) == nullptr) printf("Failed redirecting stdout to output file."); for (auto it = operations.rbegin(); it < operations.rend(); it++) printf("%s\n", it->c_str()); fclose(stdout); }
e49d64b488193c9188252539967db64291611c59.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // #include <stdio.h> #define BLOCKS_COUNT (2048 * 2048) #define THREADS_PER_BLOCK 512 __global__ void add(int *numberOne, int *numberTwo, int *addition, int count) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < count) { addition[index] = numberOne[index] + numberTwo[index]; } } void random_ints(int* dest, int count) { int counter = 0; for (counter = 0; counter < count; ++counter) { dest[counter] = rand(); } } int main(void) { // The host variables. int *host_numberOne, *host_numberTwo, *host_addition; // The device variables. int *device_numberOne, *device_numberTwo, *device_addition; // Size of variable per block int size = BLOCKS_COUNT * sizeof(int); // int counter = 0; // Allocate memory on device for device variables. hipMalloc((void **)&device_numberOne, size); hipMalloc((void **)&device_numberTwo, size); hipMalloc((void **)&device_addition, size); // Initialize host variables; host_numberOne = (int *)malloc(size); random_ints(host_numberOne, BLOCKS_COUNT); host_numberTwo = (int *)malloc(size); random_ints(host_numberTwo, BLOCKS_COUNT); host_addition = (int *)malloc(size); // Copy host variables to device memory. hipMemcpy(device_numberOne, host_numberOne, size, hipMemcpyHostToDevice); hipMemcpy(device_numberTwo, host_numberTwo, size, hipMemcpyHostToDevice); // Invoke add kernel. hipLaunchKernelGGL(( add), dim3((BLOCKS_COUNT + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, device_numberOne, device_numberTwo, device_addition, BLOCKS_COUNT); // Copy device variable to host memory. hipMemcpy(host_addition, device_addition, size, hipMemcpyDeviceToHost); // for(counter = 0; counter < BLOCKS_COUNT; ++counter) // { // printf("%5d + %5d = %5d\n", host_numberOne[counter], host_numberTwo[counter], host_addition[counter]); // } // Clean up, free all device allocated memory. free(host_numberOne); free(host_numberTwo); free(host_addition); hipFree(device_numberOne); hipFree(device_numberTwo); hipFree(device_addition); return 0; }
e49d64b488193c9188252539967db64291611c59.cu
// #include <stdio.h> #define BLOCKS_COUNT (2048 * 2048) #define THREADS_PER_BLOCK 512 __global__ void add(int *numberOne, int *numberTwo, int *addition, int count) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < count) { addition[index] = numberOne[index] + numberTwo[index]; } } void random_ints(int* dest, int count) { int counter = 0; for (counter = 0; counter < count; ++counter) { dest[counter] = rand(); } } int main(void) { // The host variables. int *host_numberOne, *host_numberTwo, *host_addition; // The device variables. int *device_numberOne, *device_numberTwo, *device_addition; // Size of variable per block int size = BLOCKS_COUNT * sizeof(int); // int counter = 0; // Allocate memory on device for device variables. cudaMalloc((void **)&device_numberOne, size); cudaMalloc((void **)&device_numberTwo, size); cudaMalloc((void **)&device_addition, size); // Initialize host variables; host_numberOne = (int *)malloc(size); random_ints(host_numberOne, BLOCKS_COUNT); host_numberTwo = (int *)malloc(size); random_ints(host_numberTwo, BLOCKS_COUNT); host_addition = (int *)malloc(size); // Copy host variables to device memory. cudaMemcpy(device_numberOne, host_numberOne, size, cudaMemcpyHostToDevice); cudaMemcpy(device_numberTwo, host_numberTwo, size, cudaMemcpyHostToDevice); // Invoke add kernel. add<<<(BLOCKS_COUNT + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(device_numberOne, device_numberTwo, device_addition, BLOCKS_COUNT); // Copy device variable to host memory. cudaMemcpy(host_addition, device_addition, size, cudaMemcpyDeviceToHost); // for(counter = 0; counter < BLOCKS_COUNT; ++counter) // { // printf("%5d + %5d = %5d\n", host_numberOne[counter], host_numberTwo[counter], host_addition[counter]); // } // Clean up, free all device allocated memory. free(host_numberOne); free(host_numberTwo); free(host_addition); cudaFree(device_numberOne); cudaFree(device_numberTwo); cudaFree(device_addition); return 0; }
6ff165b5139a217359e5517c3e39e90ef61c0e95.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *//* */ /** @file fully_fused_mlp.cu * @author Thomas Mller and Nikolaus Binder, NVIDIA * @brief Fully fused CUDA implementation of a multi-layer perceptron. Supports online training * and simultaneous inference. */ #include <tiny-cuda-nn/networks/fully_fused_mlp.h> #include <tiny-cuda-nn/cutlass_matmul.h> #include <tiny-cuda-nn/misc_kernels.h> #include <mma.h> #include <cuda_pipeline.h> TCNN_NAMESPACE_BEGIN static constexpr float K_ACT = 10.0f; void check_shmem_error(hipError_t error) { if (error != hipSuccess) { throw std::runtime_error{"FullyFusedMLP: insufficient shared memory available on the GPU. Reduce `n_neurons` or use `CutlassMLP` (better compatibility but slower) instead."}; } } template <typename fragment_t> __device__ void warp_activation(Activation activation, fragment_t& frag) { switch (activation) { case Activation::None: return; case Activation::ReLU: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] *= (__half)(frag.x[t] > (__half)0.0f); } return; case Activation::Exponential: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] = (__half)(__expf(frag.x[t])); } return; case Activation::Sigmoid: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] = (__half)(logistic(frag.x[t])); } return; case Activation::Sine: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] = (__half)(__sinf(frag.x[t])); } return; case Activation::Squareplus: #pragma unroll for (int t=0; t < frag.num_elements; t++) { float x = (float)frag.x[t] * K_ACT; frag.x[t] = (__half)(0.5f * (x + sqrtf(x * x + 4)) / K_ACT); } return; case Activation::Softplus: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] = (__half)(__logf(__expf((float)frag.x[t] * K_ACT) + 1.0f) / K_ACT); } return; default: // Unsupported activation // assert(false); // Commented out due to isolated strange side-effects on Windows return; } } template <typename fragment_t, typename forward_fragment_t> __device__ void warp_activation_backward(Activation activation, fragment_t& frag, const forward_fragment_t& forward_frag) { switch (activation) { case Activation::None: return; case Activation::ReLU: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] *= (__half)(forward_frag.x[t] > (__half)0.0f); } return; case Activation::Exponential: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] *= forward_frag.x[t]; } return; case Activation::Sigmoid: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] *= (__half)(forward_frag.x[t] * ((__half)1.0f - forward_frag.x[t])); } return; case Activation::Sine: // Sine requires stored pre-activations, which we don't have. We only // write out the post-activations. // assert(false); // Commented out due to isolated strange side-effects on Windows return; case Activation::Squareplus: #pragma unroll for (int t=0; t < frag.num_elements; t++) { float y = (float)forward_frag.x[t] * K_ACT; frag.x[t] *= (__half)(y * y / (y * y + 1)); } return; case Activation::Softplus: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] *= (__half)(1.0f - __expf(-(float)forward_frag.x[t] * K_ACT)); } return; default: // Unsupported activation // assert(false); // Commented out due to isolated strange side-effects on Windows return; } } template <int WIDTH, int BLOCK_DIM_Z, int N_ITERS, typename OUT_T, bool BACKWARD=false> __device__ void threadblock_layer(Activation activation, __half* __restrict__ act_shmem, const __half* __restrict__ weights_this_layer, OUT_T* __restrict__ out_intermediate_threadblock_this_layer, const OUT_T* __restrict__ activation_aux = nullptr) { // act_shmem contains the intermediate activations (shared memory) of the thread block's chunk of the batch. // Can be forward activations or backward activations, depending on caller. // weights_this_layer points to the weight matrix of the current layer. // out_intermediate_threadblock_this_layer points to the location where intermediate activations produced by the thread block should be written to. // Can be nullptr if nothing should be written. // activation_aux points to additional arguments that the activation function may depend on. Points to the hidden forward activations when computing backward activations. constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; constexpr uint32_t N_BLOCKS = WIDTH / 16; using namespace nvcuda; using namespace nvcuda::experimental; // If we're performing the backward pass, weights must be loaded in transposed form, which // is achieved by interpreting the memory in row_major instead of col_major order. using weights_layout_t = std::conditional_t<BACKWARD, wmma::row_major, wmma::col_major>; // Fragments wmma::fragment<wmma::matrix_a, 16, 16, 16, __half, wmma::row_major> act_frag; wmma::fragment<wmma::matrix_b, 16, 16, 16, __half, weights_layout_t> weights_frag[N_BLOCKS]; wmma::fragment<wmma::accumulator, 16, 16, 16, OUT_T> result_frag[N_ITERS]; // Indices const uint32_t li = threadIdx.x; // index in warp ("lane index") const uint32_t wi = threadIdx.y; // index in block ("warp index") const uint32_t lane_offset = (8 * li) % WIDTH; const uint32_t row = (8 * li + wi * 8 * 32) / WIDTH; const uint32_t weights_col = 16 * wi; __syncthreads(); // Load N_BLOCKS chunks of weights from global memory into registers. #pragma unroll for (uint32_t i = 0; i < N_BLOCKS; ++i) { if (BACKWARD) { // If we're performing the backward pass, additional index swizzling is needed to // load the weights in transposed form. wmma::load_matrix_sync(weights_frag[i], weights_this_layer + 16 * i * WIDTH + weights_col, WIDTH); } else { wmma::load_matrix_sync(weights_frag[i], weights_this_layer + 16 * i + weights_col * WIDTH, WIDTH); } } #pragma unroll for (int l = 0; l < N_ITERS; ++l) { wmma::fill_fragment(result_frag[l], 0.0f); #pragma unroll for (uint32_t i = 0; i < N_BLOCKS; ++i) { // Load a chunk of intermediate activations from shared memory and multiply with chunk of weights wmma::load_matrix_sync(act_frag, act_shmem + 16 * i + (16 * (threadIdx.z + l * BLOCK_DIM_Z)) * (WIDTH + SKEW), WIDTH + SKEW); wmma::mma_sync(result_frag[l], act_frag, weights_frag[i], result_frag[l]); } // Activation if (BACKWARD) { // Load the temporary forward matrix for the relu transfer wmma::load_matrix_sync(act_frag, activation_aux + weights_col + (threadIdx.z + l * BLOCK_DIM_Z) * 16 * WIDTH, WIDTH); warp_activation_backward(activation, result_frag[l], act_frag); } else { warp_activation(activation, result_frag[l]); } } __syncthreads(); #pragma unroll for (int l = 0; l < N_ITERS; ++l) { wmma::store_matrix_sync(act_shmem + weights_col + (threadIdx.z + l * BLOCK_DIM_Z) * 16 * (WIDTH + SKEW), result_frag[l], WIDTH + SKEW, wmma::mem_row_major); } if (out_intermediate_threadblock_this_layer != nullptr) { __syncthreads(); #pragma unroll for (int l = 0; l < N_ITERS; ++l) { *(int4*)&out_intermediate_threadblock_this_layer[lane_offset + (row + 16 * (threadIdx.z + l * BLOCK_DIM_Z)) * WIDTH] = *(int4*)&act_shmem[lane_offset + (row + 16 * (threadIdx.z + l * BLOCK_DIM_Z)) * (WIDTH + SKEW)]; } } } template <int WIDTH, int BLOCK_DIM_Z, int N_ITERS> __device__ void threadblock_load_input_static(__half* __restrict__ act_shmem, const __half* __restrict__ input_threadblock) { // act_shmem will be filled by the thread block's chunk of input_threadblock constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; // Indices const uint32_t li = threadIdx.x; // index in warp ("lane index") const uint32_t wi = threadIdx.y; // index in block ("warp index") const uint32_t lane_offset = (8 * li) % WIDTH; const uint32_t row = (8 * li + wi * 8 * 32) / WIDTH; #pragma unroll for (int i = 0; i < N_ITERS; ++i) { *(int4*)&act_shmem[lane_offset + (row + 16 * (threadIdx.z + i * BLOCK_DIM_Z)) * (WIDTH + SKEW)] = *(int4*)&input_threadblock[lane_offset + (row + 16 * (threadIdx.z + i * BLOCK_DIM_Z)) * WIDTH]; } } template <int WIDTH, int BLOCK_DIM_Z, int N_ITERS, Activation ACTIVATION, typename OUTPUT_LAYOUT> __global__ void kernel_mlp_fused_backward(const __half* __restrict__ dL_doutput, const __half* __restrict__ weights, __half* __restrict__ out_intermediate, const __half* __restrict__ forward, __half* __restrict__ dL_dinput, const __half* __restrict__ weights_first_layer, const uint32_t batch_size, const uint32_t out_width, const uint32_t n_hidden_matmuls) { // `dL_doutput` points to the input matrix of the backward pass, i.e. the loss gradients. Assumed to be 16 neurons wide. // `weights` points to the weight matrices (contiguous in memory). // `out_intermediate` points to the memory where backpropagated activation gradients should be written. // `forward` points to the memory where the intermediate activations of the forward pass are located. (needed for activation backprop) constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; // Indices const uint32_t li = threadIdx.x; // index in warp ("lane index") const uint32_t wi = threadIdx.y; // index in block ("warp index") const uint32_t bi = blockIdx.x; // block index // Shared memory contains the intermediate activations of blockDim.y*16 elements. // A skew is applied to the matrix storage to avoid bank conflicts. extern __shared__ __half shmem[]; __half* act_shmem = shmem; const uint32_t lane_offset = (8 * li) % WIDTH; const uint32_t row = (8 * li + wi * 8 * 32) / WIDTH; // Multipying one 16-row chunk of intermediate activations with the weight matrix requires all warps of the block. // Thus, each block computes exactly one 16-row chunk of the next layer's intermediate activations. const uint32_t elem_idx_base = 16 * bi * N_ITERS * BLOCK_DIM_Z; const uint32_t elem_idx = elem_idx_base + 16 * threadIdx.z; const uint32_t layer_stride = WIDTH * WIDTH; const uint32_t output_stride = WIDTH * batch_size; // Backprop through last layer if (out_width <= 16) { using namespace nvcuda; using namespace nvcuda::experimental; // Fragments in registers wmma::fragment<wmma::matrix_a, 16, 16, 16, __half, OUTPUT_LAYOUT> act_frag; wmma::fragment<wmma::matrix_b, 16, 16, 16, __half, wmma::row_major> weights_frag; wmma::fragment<wmma::accumulator, 16, 16, 16, __half> result_frag[N_ITERS]; // Load the relevant chunk of the last layer's weight matrix from global memory into registers const uint32_t weights_col = 16 * wi; wmma::load_matrix_sync(weights_frag, weights + layer_stride * n_hidden_matmuls + weights_col, WIDTH); #pragma unroll for (int l = 0; l < N_ITERS; ++l) { wmma::fill_fragment(result_frag[l], 0.0f); // Load a chunk of output gradients from shared memory and multiply with previously loaded weights if (std::is_same<OUTPUT_LAYOUT, wmma::row_major>::value) { wmma::load_matrix_sync(act_frag, dL_doutput + (elem_idx + 16 * (threadIdx.z + l * BLOCK_DIM_Z)) * 16, 16); } else { wmma::load_matrix_sync(act_frag, dL_doutput + (elem_idx + 16 * (threadIdx.z + l * BLOCK_DIM_Z)), batch_size); } // NOTE: activation transfer of the _output_ activation is expected to be done _prior_ to calling this kernel // in a separate pass, because the tranfered activation gradient is also needed to compute the weight // gradient of the last weight matrix (see backward()). wmma::mma_sync(result_frag[l], act_frag, weights_frag, result_frag[l]); // Load the temporary forward matrix for the relu transfer wmma::fragment<wmma::matrix_a, 16, 16, 16, __half, wmma::row_major> forward_frag; wmma::load_matrix_sync(forward_frag, forward + output_stride * n_hidden_matmuls + weights_col + (elem_idx + l * BLOCK_DIM_Z * 16) * WIDTH, WIDTH); warp_activation_backward(ACTIVATION, result_frag[l], forward_frag); } __syncthreads(); #pragma unroll for (int l = 0; l < N_ITERS; ++l) { wmma::store_matrix_sync(act_shmem + weights_col + (16 * (threadIdx.z + l * BLOCK_DIM_Z)) * (WIDTH + SKEW), result_frag[l], WIDTH + SKEW, wmma::mem_row_major); } __syncthreads(); #pragma unroll for (int i = 0; i < N_ITERS; ++i) { *(int4*)&out_intermediate[lane_offset + (row + elem_idx + i * BLOCK_DIM_Z * 16) * WIDTH] = *(int4*)&act_shmem[lane_offset + (row + 16 * (threadIdx.z + i * BLOCK_DIM_Z)) * (WIDTH + SKEW)]; } } else { // If the output width is larger than 16, we will have used CUTLASS for backpropping through the last layer. // Load the resulting gradients. threadblock_load_input_static<WIDTH, BLOCK_DIM_Z, N_ITERS>(act_shmem, out_intermediate + elem_idx * WIDTH); } // Backprop through hidden layers for (uint32_t k = 0; k < n_hidden_matmuls; ++k) { threadblock_layer<WIDTH, BLOCK_DIM_Z, N_ITERS, __half, true>(ACTIVATION, act_shmem, weights + layer_stride * (n_hidden_matmuls - k - 1), out_intermediate + output_stride * (k + 1) + elem_idx_base * WIDTH, forward + output_stride * (n_hidden_matmuls - k - 1) + elem_idx_base * WIDTH); } // Compute loss gradients w.r.t. input if desired. // THIS CODE ASSUMES THAT THE INPUT WIDTH IS THE SAME AS THE NETWORK WIDTH. // DON'T PASS A NON-NULL dL_dinput IF THIS REQUIREMENT IS NOT MET. if (dL_dinput != nullptr) { threadblock_layer<WIDTH, BLOCK_DIM_Z, N_ITERS, __half, true>(Activation::None, act_shmem, weights_first_layer, dL_dinput + elem_idx_base * WIDTH); } } template <int WIDTH, typename T, Activation ACTIVATION> void mlp_fused_backward( hipStream_t stream, const GPUMatrix<T, RM>& weights_first_layer, const GPUMatrix<T, RM>& weights, const GPUMatrixDynamic<T>& dL_doutput, GPUMatrix<T>& temporaries, const GPUMatrix<T>& forward, GPUMatrix<T>* dL_dinput, const uint32_t n_hidden_matmuls ) { if constexpr (std::is_same<T, __half>::value) { const uint32_t batch_size = dL_doutput.cols(); const uint32_t out_width = dL_doutput.rows(); constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; constexpr uint32_t N_BLOCKS = WIDTH / 16; if (forward.cols() != batch_size) { throw std::runtime_error{"Batch size of matrices dL_doutput and temporaries doesn't match."}; } const int N_ITERS = WIDTH >= 256 ? 2 : 8; const uint32_t BLOCK_DIM_Z = 1; if (batch_size % (16 * N_ITERS * BLOCK_DIM_Z) != 0) { throw std::runtime_error{"Batch size must be a multiple of " + std::to_string(16 * N_ITERS * BLOCK_DIM_Z) + "."}; } const dim3 threads = { 32u, N_BLOCKS, BLOCK_DIM_Z }; // 32 threads = 1 warp, 8 warps per block for 16 rows, up to 2x 8 warps can share input (does not help vs. 1) uint32_t n_elems_per_block = 16 * BLOCK_DIM_Z * N_ITERS; uint32_t n_blocks = div_round_up(batch_size, n_elems_per_block); int shmem_size = sizeof(__half) * ((16 * BLOCK_DIM_Z * N_ITERS) * (WIDTH + SKEW)); // WIDTH rows of input and 16 * threads.z rows of weights const dim3 blocks = { n_blocks, 1u, 1u }; // The kernels operate with transposed layouts compared with the MLP code if (dL_doutput.layout() == RM) { check_shmem_error(hipFuncSetAttribute(kernel_mlp_fused_backward<WIDTH, BLOCK_DIM_Z, N_ITERS, ACTIVATION, nvcuda::wmma::col_major>, hipFuncAttributeMaxDynamicSharedMemorySize, shmem_size)); hipLaunchKernelGGL(( kernel_mlp_fused_backward<WIDTH, BLOCK_DIM_Z, N_ITERS, ACTIVATION, nvcuda::wmma::col_major>), dim3(blocks), dim3(threads), shmem_size, stream, dL_doutput.data(), weights.data(), temporaries.data(), forward.data(), dL_dinput ? dL_dinput->data() : nullptr, weights_first_layer.data(), batch_size, out_width, n_hidden_matmuls); } else { check_shmem_error(hipFuncSetAttribute(kernel_mlp_fused_backward<WIDTH, BLOCK_DIM_Z, N_ITERS, ACTIVATION, nvcuda::wmma::row_major>, hipFuncAttributeMaxDynamicSharedMemorySize, shmem_size)); hipLaunchKernelGGL(( kernel_mlp_fused_backward<WIDTH, BLOCK_DIM_Z, N_ITERS, ACTIVATION, nvcuda::wmma::row_major>), dim3(blocks), dim3(threads), shmem_size, stream, dL_doutput.data(), weights.data(), temporaries.data(), forward.data(), dL_dinput ? dL_dinput->data() : nullptr, weights_first_layer.data(), batch_size, out_width, n_hidden_matmuls); } } else { throw std::runtime_error{"The fully fused backward pass only supports __half precision."}; } } template <int WIDTH, int BLOCK_DIM_Z, int N_ITERS, typename OUT_T> __device__ void threadblock_input_layer_forward_dynamic(Activation activation, __half* __restrict__ act_shmem, const __half* __restrict__ input_threadblock, const __half* __restrict__ weights_this_layer, OUT_T* __restrict__ out_intermediate_threadblock_this_layer, const uint32_t in_width) { // act_shmem contains the intermediate activations (shared memory) of the thread block's chunk of the batch // input_threadblock points to the thread block's chunk of the input batch in global memory // weights_this_layer points to the weight matrix of the current layer // out_intermediate_threadblock_this_layer points to the location where intermediate activations produced by the thread block should be written to. // Can be nullptr if nothing should be written. // in_width is the dynamic width of the input layer constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; constexpr uint32_t INPUT_SKEW = 8; constexpr uint32_t N_BLOCKS = WIDTH / 16; using namespace nvcuda; using namespace nvcuda::experimental; // Fragments wmma::fragment<wmma::matrix_a, 16, 16, 16, __half, wmma::row_major> act_frag; wmma::fragment<wmma::matrix_b, 16, 16, 16, __half, wmma::col_major> weights_frag; wmma::fragment<wmma::accumulator, 16, 16, 16, OUT_T> result_frag[N_ITERS]; // Indices const uint32_t li = threadIdx.x; // index in warp ("lane index") const uint32_t wi = threadIdx.y; // index in block ("warp index") const uint32_t lane_offset = (8 * li) % WIDTH; const uint32_t row = (8 * li + wi * 8 * 32) / WIDTH; const uint32_t weights_col = 16 * wi; __half* __restrict__ weights_shmem = act_shmem + BLOCK_DIM_Z * 16 * (in_width + INPUT_SKEW); // Load input weight matrix (fits completely into shared memory) // Each thread can load 8 fp16 elements (16 bytes) at once; we have N_BLOCKS*BLOCK_DIM_Z warps const uint32_t n_elems_per_load = N_BLOCKS * 32 * BLOCK_DIM_Z * 8; const uint32_t thread_elem_idx = (li + wi * 32 + threadIdx.z * N_BLOCKS * 32) * 8; const uint32_t n_elems_b = WIDTH * in_width; #pragma unroll for (uint32_t idx = thread_elem_idx; idx < n_elems_b; idx += n_elems_per_load) { const uint32_t idx_skewed = idx + idx / in_width * INPUT_SKEW; *(int4*)&weights_shmem[idx_skewed] = *(int4*)&weights_this_layer[idx]; } const uint32_t n_tensor_ops = in_width / 16; #pragma unroll for (int l = 0; l < N_ITERS; ++l) { // Load chunk of inputs into shmem. // This is faster than loading it from gmem directly, even though it is only used once. // (Possibly due to latency hiding through staging.) const uint32_t n_elems_a = BLOCK_DIM_Z * 16 * in_width; #pragma unroll for (uint32_t idx = thread_elem_idx; idx < n_elems_a; idx += n_elems_per_load) { const uint32_t idx_skewed = idx + idx / in_width * INPUT_SKEW; *(int4*)&act_shmem[idx_skewed] = *(int4*)&input_threadblock[l * n_elems_a + idx]; } __syncthreads(); wmma::fill_fragment(result_frag[l], 0.0f); #pragma unroll for (uint32_t i = 0; i < n_tensor_ops; ++i) { // Load chunk of inputs and weights from shared memory and multiply them wmma::load_matrix_sync(act_frag, act_shmem + 16 * i + (16 * threadIdx.z) * (in_width + INPUT_SKEW), in_width + INPUT_SKEW); wmma::load_matrix_sync(weights_frag, weights_shmem + 16 * i + weights_col * (in_width + INPUT_SKEW), in_width + INPUT_SKEW); wmma::mma_sync(result_frag[l], act_frag, weights_frag, result_frag[l]); } __syncthreads(); warp_activation(activation, result_frag[l]); } #pragma unroll for (int l = 0; l < N_ITERS; ++l) { wmma::store_matrix_sync(act_shmem + weights_col + (16 * (threadIdx.z + l * BLOCK_DIM_Z)) * (WIDTH + SKEW), result_frag[l], WIDTH + SKEW, wmma::mem_row_major); } if (out_intermediate_threadblock_this_layer != nullptr) { __syncthreads(); #pragma unroll for (int i = 0; i < N_ITERS; ++i) { *(int4*)&out_intermediate_threadblock_this_layer[lane_offset + (row + 16 * (threadIdx.z + i * BLOCK_DIM_Z)) * WIDTH] = *(int4*)&act_shmem[lane_offset + (row + 16 * (threadIdx.z + i * BLOCK_DIM_Z)) * (WIDTH + SKEW)]; } } } template <int WIDTH, int BLOCK_DIM_Z, int N_ITERS, typename OUT_T> __device__ void threadblock_last_layer_forward(Activation activation, __half* __restrict__ act_shmem, const __half* __restrict__ weights_this_layer, OUT_T* __restrict__ out, const uint32_t batch_size, const nvcuda::wmma::layout_t output_layout) { // act_shmem contains the intermediate activations (shared memory) of the thread block's chunk of the batch // weights_this_layer points to the weight matrix of the current layer // out points to the location where the result produced by the thread block should be written to. // Can be nullptr if nothing should be written. constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; constexpr uint32_t N_BLOCKS = WIDTH / 16; using namespace nvcuda; using namespace nvcuda::experimental; // Fragments wmma::fragment<wmma::matrix_a, 16, 16, 16, __half, wmma::row_major> act_frag; wmma::fragment<wmma::matrix_b, 16, 16, 16, __half, wmma::col_major> weights_frag[N_BLOCKS]; wmma::fragment<wmma::accumulator, 16, 16, 16, OUT_T> result_frag; // Indices const uint32_t li = threadIdx.x; // index in warp ("lane index") const uint32_t wi = threadIdx.y; // index in block ("warp index") __half* __restrict__ weights_shmem = act_shmem + N_ITERS * BLOCK_DIM_Z * 16 * (WIDTH + SKEW); const uint32_t weights_row = (8 * li) % WIDTH; const uint32_t weights_col = (8 * li + 8 * 32 * wi) / WIDTH; // Load weight matrix into shared memory for the last multiplication. // Loading into shared memory as opposed to directly into registers is faster // because unlike in the previous layers, each warp uses the same entries of the weight matrix. if (threadIdx.z == 0) { *(int4*)&weights_shmem[weights_row + weights_col * (WIDTH + SKEW)] = *(int4*)&weights_this_layer[weights_row + weights_col * WIDTH]; } __syncthreads(); #pragma unroll for (uint32_t i = 0; i < N_BLOCKS; ++i) wmma::load_matrix_sync(weights_frag[i], weights_shmem + 16 * i, WIDTH + SKEW); // Perform last layer by parallelizing over iters for (uint32_t idx = wi; idx < N_ITERS; idx += N_BLOCKS) { wmma::fill_fragment(result_frag, 0.0f); #pragma unroll for (uint32_t i = 0; i < N_BLOCKS; ++i) { // Load a chunk of intermediate activations from shared memory and multiply with chunk of the weight matrix wmma::load_matrix_sync(act_frag, act_shmem + 16 * i + (16 * (threadIdx.z + idx * BLOCK_DIM_Z)) * (WIDTH + SKEW), WIDTH + SKEW); wmma::mma_sync(result_frag, act_frag, weights_frag[i], result_frag); } warp_activation(activation, result_frag); if (output_layout == wmma::mem_row_major) { wmma::store_matrix_sync(out + (threadIdx.z + idx * BLOCK_DIM_Z) * 16 * 16, result_frag, 16, output_layout); } else { wmma::store_matrix_sync(out + (threadIdx.z + idx * BLOCK_DIM_Z) * 16, result_frag, batch_size, output_layout); } } } template <int WIDTH, int BLOCK_DIM_Z, int N_ITERS> __device__ void threadblock_write_output_static(const __half* __restrict__ act_shmem, __half* __restrict__ output_threadblock) { // output_threadblock will be filled by the thread block's act_shmem constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; // Indices const uint32_t li = threadIdx.x; // index in warp ("lane index") const uint32_t wi = threadIdx.y; // index in block ("warp index") const uint32_t lane_offset = (8 * li) % WIDTH; const uint32_t row = (8 * li + wi * 8 * 32) / WIDTH; __syncthreads(); #pragma unroll for (int i = 0; i < N_ITERS; ++i) { *(int4*)&output_threadblock[lane_offset + (row + 16 * (threadIdx.z + i * BLOCK_DIM_Z)) * WIDTH] = *(int4*)&act_shmem[lane_offset + (row + 16 * (threadIdx.z + i * BLOCK_DIM_Z)) * (WIDTH + SKEW)]; } } template <int WIDTH, int BLOCK_DIM_Z, int N_ITERS, typename OUT_T, Activation ACTIVATION, bool INFERENCE> __global__ void kernel_mlp_fused(const Activation output_activation, const __half* __restrict__ input, const __half* __restrict__ weights, OUT_T* __restrict__ out_intermediate, OUT_T* __restrict__ out, const uint32_t batch_size, const uint32_t in_width, const uint32_t out_width, const uint32_t n_hidden_matmuls, const nvcuda::wmma::layout_t output_layout = nvcuda::wmma::mem_row_major) { // `input` points to the input matrix. Can be any width. // `weights` points to the weight matrices (contiguous in memory). // `out_intermediate` points to the memory where intermediate activations should be written. When performing inference, a value of nullptr is expected (intermediate results are not written). // `out` points to the memory where the network output should be written. (Output width is assumed to be 16 neurons.) // Commented out due to isolated strange side-effects on Windows // if (INFERENCE) { // assert(out_intermediate == nullptr); // } else { // assert(out_intermediate); // } // Shared memory contains the intermediate activations of blockDim.y*16 elements. // In some cases, it also contains the weight matrix for the first and last layer. extern __shared__ __half shmem[]; __half* act_shmem = shmem; // Each block computes exactly one 16-element chunk of the batch. const uint32_t elem_idx = 16 * blockIdx.x * N_ITERS * BLOCK_DIM_Z; // First layer if (in_width == WIDTH) { // If the input has the same width as the network, we can simply use the network's regular layer routine (with static size) // instead of using the slower dynamic input layer routine. threadblock_load_input_static<WIDTH, BLOCK_DIM_Z, N_ITERS>(act_shmem, input + elem_idx * WIDTH); threadblock_layer<WIDTH, BLOCK_DIM_Z, N_ITERS, OUT_T>(ACTIVATION, act_shmem, weights, !INFERENCE ? (out_intermediate + elem_idx * WIDTH) : nullptr); } else { threadblock_input_layer_forward_dynamic<WIDTH, BLOCK_DIM_Z, N_ITERS, OUT_T>(ACTIVATION, act_shmem, input + elem_idx * in_width, weights, !INFERENCE ? (out_intermediate + elem_idx * WIDTH) : nullptr, in_width); } const uint32_t first_layer_size = WIDTH * in_width; const uint32_t layer_stride = WIDTH * WIDTH; const uint32_t output_stride = WIDTH * batch_size; // Hidden layers for (uint32_t k = 0; k < n_hidden_matmuls; ++k) { threadblock_layer<WIDTH, BLOCK_DIM_Z, N_ITERS, OUT_T>(ACTIVATION, act_shmem, weights + first_layer_size + layer_stride * k, !INFERENCE ? (out_intermediate + output_stride * (k + 1) + elem_idx * WIDTH) : nullptr); } if (out_width > 16) { // In the forward pass, intermediate activations are already written out. if constexpr (INFERENCE) { threadblock_write_output_static<WIDTH, BLOCK_DIM_Z, N_ITERS>(act_shmem, out_intermediate + elem_idx * WIDTH); } } else { // Last layer if (output_layout == nvcuda::wmma::mem_row_major) { threadblock_last_layer_forward<WIDTH, BLOCK_DIM_Z, N_ITERS, OUT_T>(output_activation, act_shmem, weights + first_layer_size + layer_stride * n_hidden_matmuls, out + elem_idx * 16, 16, output_layout); } else { threadblock_last_layer_forward<WIDTH, BLOCK_DIM_Z, N_ITERS, OUT_T>(output_activation, act_shmem, weights + first_layer_size + layer_stride * n_hidden_matmuls, out + elem_idx, batch_size, output_layout); } } } template <int WIDTH, typename T, Activation ACTIVATION, bool INFERENCE> void mlp_fused_forward( hipStream_t stream, Activation output_activation, const GPUMatrix<T, RM>& weights, const GPUMatrix<T>& input, GPUMatrix<T>& output_intermediate, GPUMatrixDynamic<T>& output, const uint32_t n_hidden_layers ) { if constexpr (std::is_same<T, __half>::value) { const uint32_t batch_size = input.cols(); const uint32_t in_width = input.rows(); const uint32_t out_width = output.rows(); constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; // <- always going to be 8 as we only support multiple-of-16 widths constexpr uint32_t INPUT_SKEW = 8; // <- likewise with inputs constexpr uint32_t N_BLOCK_ROWS = WIDTH / 16; static_assert(WIDTH % 16 == 0, "Width must be a multiply of 16."); if (in_width % 16 != 0) { throw std::runtime_error{"Inputs must have a multiple-of-16 elements."}; } if (weights.rows() != WIDTH) { throw std::runtime_error{"The fully fused forward pass only works with WIDTH-sized matrices."}; } if (weights.cols() % 16 != 0) { throw std::runtime_error{std::string("weights must have a multiple-of-16 number of columns. ") + std::to_string(weights.cols())}; } if (output_intermediate.cols() != batch_size) { throw std::runtime_error{"Batch size of inputs and output_intermediate doesn't match."}; } if (output.cols() != batch_size) { throw std::runtime_error{"Batch size of inputs and outputs doesn't match."}; } const int N_ITERS = WIDTH >= 256 ? 2 : 8; const uint32_t BLOCK_DIM_Z = (INFERENCE && WIDTH == 128) ? 2 : 1; if (batch_size % (16 * N_ITERS * BLOCK_DIM_Z) != 0) { throw std::runtime_error{"Batch size must be a multiple of " + std::to_string(16 * N_ITERS * BLOCK_DIM_Z) + "."}; } const dim3 threads = { 32u, N_BLOCK_ROWS, BLOCK_DIM_Z }; // 32 threads = 1 warp, N_BLOCK_ROWS warps per block for 16 rows, up to 2x 8 warps can share input (does not help vs. 1) uint32_t n_elems_per_block = 16 * BLOCK_DIM_Z * N_ITERS; uint32_t n_blocks = div_round_up(batch_size, n_elems_per_block); size_t shmem_size = sizeof(__half) * (16 + 16 * BLOCK_DIM_Z * N_ITERS) * (WIDTH + SKEW); // 16*WIDTH rows of weights (for the last layer; others are in registers only) + 16*WIDTH*BLOCK_DIM_Z*N_ITERS rows of intermediate activations if (in_width != WIDTH) { // If the input width is dynamic, the input weight matrix as well as part of the input will live in extra shared memory shmem_size = ::max(shmem_size, sizeof(__half) * (WIDTH + 16 * BLOCK_DIM_Z) * (in_width + INPUT_SKEW)); } const dim3 blocks = { n_blocks, 1u, 1u }; check_shmem_error(hipFuncSetAttribute(kernel_mlp_fused<WIDTH, BLOCK_DIM_Z, N_ITERS, __half, ACTIVATION, INFERENCE>, hipFuncAttributeMaxDynamicSharedMemorySize, (int)shmem_size)); hipLaunchKernelGGL(( kernel_mlp_fused<WIDTH, BLOCK_DIM_Z, N_ITERS, __half, ACTIVATION, INFERENCE>), dim3(blocks), dim3(threads), shmem_size, stream, output_activation, input.data(), weights.data(), output_intermediate.data(), output.data(), batch_size, in_width, out_width, n_hidden_layers, output.layout() == RM ? nvcuda::wmma::mem_col_major : nvcuda::wmma::mem_row_major // The kernels operate with transposed layouts compared with the MLP code ); } else { throw std::runtime_error{"The fully fused forward pass only supports __half precision."}; } } template <typename T, int WIDTH> FullyFusedMLP<T, WIDTH>::FullyFusedMLP( uint32_t input_width, uint32_t output_width, uint32_t n_hidden_layers, bool use_feedback_alignment, Activation activation, Activation output_activation ) : m_input_width{input_width}, m_network_width{WIDTH}, m_output_width{output_width}, m_n_hidden_layers{n_hidden_layers}, m_use_feedback_alignment{use_feedback_alignment}, m_activation{activation}, m_output_activation{output_activation} { if (m_n_hidden_layers <= 0) { throw std::runtime_error("FullyFusedMLP requires at least 1 hidden layer (3 layers in total)."); } m_n_hidden_matmuls = n_hidden_layers-1; m_padded_output_width = next_multiple(m_output_width, tensorcore_width); // Create matrices related to weights m_weight_matrices.emplace_back(nullptr, m_network_width, m_input_width); m_weight_matrices_inference.emplace_back(nullptr, m_network_width, m_input_width); m_weight_matrices_backward.emplace_back(nullptr, m_network_width, m_input_width); m_weight_matrices_full_precision.emplace_back(nullptr, m_network_width, m_input_width); m_gradient_matrices.emplace_back(nullptr, m_network_width, m_input_width); for (uint32_t i = 0; i < m_n_hidden_matmuls; ++i) { m_weight_matrices.emplace_back(nullptr, m_network_width, m_network_width); m_weight_matrices_inference.emplace_back(nullptr, m_network_width, m_network_width); m_weight_matrices_backward.emplace_back(nullptr, m_network_width, m_network_width); m_weight_matrices_full_precision.emplace_back(nullptr, m_network_width, m_network_width); m_gradient_matrices.emplace_back(nullptr, m_network_width, m_network_width); } m_weight_matrices.emplace_back(nullptr, m_padded_output_width, m_network_width); m_weight_matrices_inference.emplace_back(nullptr, m_padded_output_width, m_network_width); m_weight_matrices_backward.emplace_back(nullptr, m_padded_output_width, m_network_width); m_weight_matrices_full_precision.emplace_back(nullptr, m_padded_output_width, m_network_width); m_gradient_matrices.emplace_back(nullptr, m_padded_output_width, m_network_width); // Determine total number of memory entries and set it m_total_n_params = 0; for (const auto& m : m_weight_matrices) { m_total_n_params += m.n_elements(); } // Buffers to keep data from the forward and backward pass m_forward_tmp.resize(m_n_hidden_layers); m_backward_tmp.resize(m_n_hidden_layers); // 1 stream per matmul m_training_splitk_streams.resize(m_n_hidden_layers + 1); m_training_splitk_events.resize(m_n_hidden_layers + 1); for (size_t i = 0; i < m_training_splitk_streams.size(); ++i) { CUDA_CHECK_THROW(hipStreamCreate(&m_training_splitk_streams[i])); CUDA_CHECK_THROW(hipEventCreate(&m_training_splitk_events[i])); } } template <typename T, int WIDTH> FullyFusedMLP<T, WIDTH>::~FullyFusedMLP() { for (size_t i = 0; i < m_training_splitk_streams.size(); ++i) { free_workspace(m_training_splitk_streams[i]); CUDA_CHECK_PRINT(hipEventDestroy(m_training_splitk_events[i])); CUDA_CHECK_PRINT(hipStreamDestroy(m_training_splitk_streams[i])); } } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::inference(hipStream_t stream, const GPUMatrix<T>& input, GPUMatrix<float>& output) { inference_mixed_precision(stream, input, m_inference_output_tmp); const uint32_t n_elements = (uint32_t)output.n_elements(); hipLaunchKernelGGL(( trim_and_cast<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, m_padded_output_width, m_output_width, m_inference_output_tmp.data(), output.data()); } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::inference_mixed_precision(hipStream_t stream, const GPUMatrix<T>& input, GPUMatrixDynamic<T>& output, bool use_inference_matrices) { // Various error checks if (input.m() != m_input_width) { throw std::runtime_error(std::string("Input has incorrect width: ") + std::to_string(input.m()) + "!=" + std::to_string(m_input_width)); } if (&output != &m_inference_output_tmp && output.m() != m_padded_output_width) { throw std::runtime_error(std::string("Output has incorrect width: ") + std::to_string(output.m()) + "!=" + std::to_string(m_output_width)); } if (&output != &m_inference_output_tmp && input.n() != output.n()) { throw std::runtime_error(std::string("Input and output don't have matching batch size: ") + std::to_string(input.n()) + "!=" + std::to_string(output.n())); } // Make sure our temporary buffers have the correct size for the given batch size uint32_t batch_size = input.n(); if (m_inference_tmp.n() != batch_size) { allocate_inference_buffers(batch_size); } const WeightUsage weight_usage = use_inference_matrices ? WeightUsage::Inference : WeightUsage::Forward; // ASSUMPTION: weight matrices are contiguous in memory switch (m_activation) { case Activation::None: mlp_fused_forward<WIDTH, T, Activation::None, true>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_inference_tmp, output, m_n_hidden_matmuls); break; case Activation::Exponential: mlp_fused_forward<WIDTH, T, Activation::Exponential, true>(stream, m_output_activation, input_weight_matrix(weight_usage), input, m_inference_tmp, output, m_n_hidden_matmuls); break; case Activation::Sigmoid: mlp_fused_forward<WIDTH, T, Activation::Sigmoid, true>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_inference_tmp, output, m_n_hidden_matmuls); break; case Activation::ReLU: mlp_fused_forward<WIDTH, T, Activation::ReLU, true>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_inference_tmp, output, m_n_hidden_matmuls); break; case Activation::Squareplus: mlp_fused_forward<WIDTH, T, Activation::Squareplus, true>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_inference_tmp, output, m_n_hidden_matmuls); break; case Activation::Softplus: mlp_fused_forward<WIDTH, T, Activation::Softplus, true>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_inference_tmp, output, m_n_hidden_matmuls); break; default: throw std::runtime_error{"Unsupported activation."}; } // If we have more than 16 output dimensions, these will be taken care of by CUTLASS rather than // the fully fused kernel (which will have written out the second-to-last layer activations). if (m_output_width > 16) { if (output.layout() == CM) { auto tmp = GPUMatrix<T>{output}; compute_inference_layer<LastLayer>(stream, m_output_activation, output_weight_matrix(weight_usage), m_inference_tmp, tmp, (T)m_output_activation_param); } else { auto tmp = GPUMatrix<T, RM>{output}; compute_inference_layer<LastLayer>(stream, m_output_activation, output_weight_matrix(weight_usage), m_inference_tmp, tmp, (T)m_output_activation_param); } } } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::forward(hipStream_t stream, const GPUMatrix<T>& input, GPUMatrixDynamic<T>& output, bool use_inference_matrices, bool prepare_input_gradients) { // Various error checks if (input.m() != m_input_width) { throw std::runtime_error(std::string("Input has incorrect width: ") + std::to_string(input.m()) + "!=" + std::to_string(m_input_width)); } if (output.m() != m_padded_output_width) { throw std::runtime_error(std::string("Output has incorrect width (must be padded): ") + std::to_string(output.m()) + "!=" + std::to_string(m_padded_output_width)); } if (input.n() != output.n()) { throw std::runtime_error(std::string("Input and output don't have matching batch size: ") + std::to_string(input.n()) + "!=" + std::to_string(output.n())); } // Make sure our temporary buffers have the correct size for the given batch size uint32_t batch_size = input.n(); if (m_forward_tmp.front().n() != batch_size) { allocate_forward_buffers(batch_size); } const WeightUsage weight_usage = use_inference_matrices ? WeightUsage::Inference : WeightUsage::Forward; // ASSUMPTION: weight matrices & forward_tmp matrices are contiguous in memory switch (m_activation) { case Activation::None: mlp_fused_forward<WIDTH, T, Activation::None, false>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_forward_tmp.at(0), output, m_n_hidden_matmuls); break; case Activation::Exponential: mlp_fused_forward<WIDTH, T, Activation::Exponential, false>(stream, m_output_activation, input_weight_matrix(weight_usage), input, m_forward_tmp.at(0), output, m_n_hidden_matmuls); break; case Activation::Sigmoid: mlp_fused_forward<WIDTH, T, Activation::Sigmoid, false>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_forward_tmp.at(0), output, m_n_hidden_matmuls); break; case Activation::ReLU: mlp_fused_forward<WIDTH, T, Activation::ReLU, false>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_forward_tmp.at(0), output, m_n_hidden_matmuls); break; case Activation::Squareplus: mlp_fused_forward<WIDTH, T, Activation::Squareplus, false>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_forward_tmp.at(0), output, m_n_hidden_matmuls); break; case Activation::Softplus: mlp_fused_forward<WIDTH, T, Activation::Softplus, false>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_forward_tmp.at(0), output, m_n_hidden_matmuls); break; default: throw std::runtime_error{"Unsupported activation."}; } // If we have more than 16 output dimensions, these will be taken care of by CUTLASS rather than // the fully fused kernel (which will have written out the second-to-last layer activations). if (m_output_width > 16) { if (output.layout() == CM) { auto tmp = GPUMatrix<T>{output}; compute_inference_layer<LastLayer>(stream, m_output_activation, output_weight_matrix(weight_usage), m_forward_tmp.back(), tmp, (T)m_output_activation_param); } else { auto tmp = GPUMatrix<T, RM>{output}; compute_inference_layer<LastLayer>(stream, m_output_activation, output_weight_matrix(weight_usage), m_forward_tmp.back(), tmp, (T)m_output_activation_param); } } } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::backward( hipStream_t stream, const GPUMatrix<T>& input, const GPUMatrixDynamic<T>& output, const GPUMatrixDynamic<T>& dL_doutput, GPUMatrix<T>* dL_dinput, bool use_inference_matrices, bool compute_param_gradients ) { if (dL_doutput.m() != m_padded_output_width) { throw std::runtime_error(std::string("Output gradients have incorrect width (must be padded): ") + std::to_string(dL_doutput.m()) + "!=" + std::to_string(m_padded_output_width)); } // Make sure our temporary buffers have the correct size for the given batch size uint32_t batch_size = dL_doutput.n(); if (m_backward_tmp.front().n() != batch_size) { allocate_backward_buffers(batch_size); } // Compute transfer of output activation in-place... it's treated specially for performance reasons const uint32_t n_elements = (uint32_t)dL_doutput.n_elements(); switch (m_output_activation) { case Activation::None: break; case Activation::Exponential:hipLaunchKernelGGL(( exp_transfer_output<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, output.data(), dL_doutput.data(), m_backward_output_tmp.data()); break; case Activation::Sigmoid:hipLaunchKernelGGL(( logistic_transfer_output<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, output.data(), dL_doutput.data(), m_backward_output_tmp.data()); break; case Activation::ReLU:hipLaunchKernelGGL(( relu_transfer_output<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, output.data(), dL_doutput.data(), m_backward_output_tmp.data()); break; default: throw std::runtime_error{"Unsupported output activation."}; } // Backprop // - weight_gradient.T = activation * output_gradient.T // - input_gradient = weights.T * output_gradient // - RELU: pre_activation_gradinet = post_activation_gradient if val > 0 else 0 const WeightUsage weight_usage = use_inference_matrices ? WeightUsage::Inference : WeightUsage::Backward; Activation transfer_activation = m_activation == Activation::None ? Activation::None : Activation::ReLUTransfer; { // T normalization = (T)(1.0f / batch_size); T normalization = (T)(1.0f); int split_k_factor = batch_size / ::min((uint32_t)(1 << 12), batch_size); m_backward_output_tmp.set_layout(dL_doutput.layout()); const GPUMatrixDynamic<T>& tmp_dL_doutput = m_output_activation == Activation::None ? dL_doutput : m_backward_output_tmp; uint32_t tmp_idx = m_n_hidden_matmuls; uint32_t backward_tmp_idx = 0; if (compute_param_gradients) { // Output layer hipEventRecord(m_training_splitk_events.at(backward_tmp_idx), stream); hipStreamWaitEvent(m_training_splitk_streams.at(backward_tmp_idx), m_training_splitk_events.at(backward_tmp_idx), 0); // Compute weight gradients if (output.layout() == CM) { fc_multiply_split_k<Activation::None, LastLayerK>(m_training_splitk_streams.at(backward_tmp_idx), GPUMatrix<T>{tmp_dL_doutput}, m_forward_tmp.at(tmp_idx).transposed(), output_gradient_matrix(), split_k_factor, normalization); } else { fc_multiply_split_k<Activation::None, LastLayerK>(m_training_splitk_streams.at(backward_tmp_idx), GPUMatrix<T, RM>{tmp_dL_doutput}, m_forward_tmp.at(tmp_idx).transposed(), output_gradient_matrix(), split_k_factor, normalization); } hipEventRecord(m_training_splitk_events.at(backward_tmp_idx), m_training_splitk_streams.at(backward_tmp_idx)); } // If the output width is larger than 16 dims, we use cutlass to backpropagate through the last layer // rather than fusing it with our kernel. if (m_output_width > 16) { switch (transfer_activation) { case Activation::None: if (output.layout() == CM) { fc_multiply<Activation::None, FullLayer>(stream, output_weight_matrix(weight_usage).transposed(), GPUMatrix<T>{tmp_dL_doutput}, m_forward_tmp.at(tmp_idx), m_backward_tmp.at(backward_tmp_idx)); } else { fc_multiply<Activation::None, FullLayer>(stream, output_weight_matrix(weight_usage).transposed(), GPUMatrix<T, RM>{tmp_dL_doutput}, m_forward_tmp.at(tmp_idx), m_backward_tmp.at(backward_tmp_idx)); } break; case Activation::ReLUTransfer: if (output.layout() == CM) { fc_multiply<Activation::ReLUTransfer, FullLayer>(stream, output_weight_matrix(weight_usage).transposed(), GPUMatrix<T>{tmp_dL_doutput}, m_forward_tmp.at(tmp_idx), m_backward_tmp.at(backward_tmp_idx)); } else { fc_multiply<Activation::ReLUTransfer, FullLayer>(stream, output_weight_matrix(weight_usage).transposed(), GPUMatrix<T, RM>{tmp_dL_doutput}, m_forward_tmp.at(tmp_idx), m_backward_tmp.at(backward_tmp_idx)); } break; default: throw std::runtime_error{"Unsupported activation transfer."}; }; } // ASSUMPTION: weight matrices & forward_tmp matrices are contiguous in memory auto dL_dinput_fused = input.m() == m_forward_tmp.at(0).m() ? dL_dinput : nullptr; // Only let the fully fused kernel compute gradients w.r.t. the input, if the input layer has the same size as the other layers switch (m_activation) { case Activation::None: mlp_fused_backward<WIDTH, T, Activation::None>( stream, input_weight_matrix(weight_usage), weight_matrix_at(weight_usage, 0), tmp_dL_doutput, m_backward_tmp.at(backward_tmp_idx), m_forward_tmp.at(0), dL_dinput_fused, m_n_hidden_matmuls); break; case Activation::Exponential: mlp_fused_backward<WIDTH, T, Activation::Exponential>(stream, input_weight_matrix(weight_usage), weight_matrix_at(weight_usage, 0), tmp_dL_doutput, m_backward_tmp.at(backward_tmp_idx), m_forward_tmp.at(0), dL_dinput_fused, m_n_hidden_matmuls); break; case Activation::Sigmoid: mlp_fused_backward<WIDTH, T, Activation::Sigmoid>( stream, input_weight_matrix(weight_usage), weight_matrix_at(weight_usage, 0), tmp_dL_doutput, m_backward_tmp.at(backward_tmp_idx), m_forward_tmp.at(0), dL_dinput_fused, m_n_hidden_matmuls); break; case Activation::ReLU: mlp_fused_backward<WIDTH, T, Activation::ReLU>( stream, input_weight_matrix(weight_usage), weight_matrix_at(weight_usage, 0), tmp_dL_doutput, m_backward_tmp.at(backward_tmp_idx), m_forward_tmp.at(0), dL_dinput_fused, m_n_hidden_matmuls); break; case Activation::Squareplus: mlp_fused_backward<WIDTH, T, Activation::Squareplus>( stream, input_weight_matrix(weight_usage), weight_matrix_at(weight_usage, 0), tmp_dL_doutput, m_backward_tmp.at(backward_tmp_idx), m_forward_tmp.at(0), dL_dinput_fused, m_n_hidden_matmuls); break; case Activation::Softplus: mlp_fused_backward<WIDTH, T, Activation::Softplus>( stream, input_weight_matrix(weight_usage), weight_matrix_at(weight_usage, 0), tmp_dL_doutput, m_backward_tmp.at(backward_tmp_idx), m_forward_tmp.at(0), dL_dinput_fused, m_n_hidden_matmuls); break; default: throw std::runtime_error{"Unsupported activation."}; } tmp_idx -= 1; ++backward_tmp_idx; // layers for (uint32_t i = 0; i < m_n_hidden_matmuls; ++i) { uint32_t matrix_idx = m_n_hidden_matmuls - i - 1; if (compute_param_gradients) { hipEventRecord(m_training_splitk_events.at(backward_tmp_idx), stream); hipStreamWaitEvent(m_training_splitk_streams.at(backward_tmp_idx), m_training_splitk_events.at(backward_tmp_idx), 0); fc_multiply_split_k<Activation::None, FullLayerK>(m_training_splitk_streams.at(backward_tmp_idx), m_backward_tmp.at(backward_tmp_idx-1), m_forward_tmp.at(tmp_idx).transposed(), gradient_matrix_at(matrix_idx), split_k_factor, normalization); hipEventRecord(m_training_splitk_events.at(backward_tmp_idx), m_training_splitk_streams.at(backward_tmp_idx)); } tmp_idx -= 1; ++backward_tmp_idx; } if (compute_param_gradients) { hipEventRecord(m_training_splitk_events.at(backward_tmp_idx), stream); hipStreamWaitEvent(m_training_splitk_streams.at(backward_tmp_idx), m_training_splitk_events.at(backward_tmp_idx), 0); fc_multiply_split_k<Activation::None, FullLayerK>(m_training_splitk_streams.at(backward_tmp_idx), m_backward_tmp.at(backward_tmp_idx-1), input.transposed(), input_gradient_matrix(), split_k_factor, normalization); hipEventRecord(m_training_splitk_events.at(backward_tmp_idx), m_training_splitk_streams.at(backward_tmp_idx)); } // If requested and if the fully fused kernel didn't already take care of it, compute sensitivity of loss w.r.t. inputs if (dL_dinput && input.m() != m_forward_tmp.at(0).m()) { // TODO: optimization opportunity to only compute sensitivity w.r.t selected SUBSET of inputs. Useful for NFs, where conditional dims stay the same. fc_multiply<Activation::None, FullLayer>(stream, input_weight_matrix(weight_usage).transposed(), m_backward_tmp.at(backward_tmp_idx-1), *dL_dinput); } } if (compute_param_gradients) { // All the per-layer split-k matrix multiplications summing over // the batch are computed in parallel streams to the actual // backpropagation. Here, we need to wait for all of these to complete. for (auto& event : m_training_splitk_events) { hipStreamWaitEvent(stream, event, 0); } } } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::allocate_inference_buffers(uint32_t batch_size) { m_inference_tmp.set_size(m_network_width, batch_size); m_inference_output_tmp.set_size(m_padded_output_width, batch_size); GPUMatrixBase::allocate_shared_memory( m_inference_buffer, { &m_inference_tmp, &m_inference_output_tmp, } ); } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::allocate_forward_buffers(uint32_t batch_size) { for (size_t i = 0; i < m_forward_tmp.size(); ++i) { m_forward_tmp[i].set_size(m_network_width, batch_size); } GPUMatrixBase::allocate_shared_memory(m_forward_buffer, m_forward_tmp); } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::allocate_backward_buffers(uint32_t batch_size) { std::vector<GPUMatrixBase*> matrix_pointers = {&m_backward_output_tmp}; m_backward_output_tmp.set_size(m_padded_output_width, batch_size); for (uint32_t i = 0; i < (uint32_t)m_backward_tmp.size(); ++i) { m_backward_tmp[i].set_size(m_network_width, batch_size); matrix_pointers.emplace_back(&m_backward_tmp[i]); } GPUMatrixBase::allocate_shared_memory(m_backward_buffer, matrix_pointers); } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::initialize_params(std::mt19937& rnd, float* params_full_precision, T* params, T* inference_params, T* backward_params, T* gradients, float scale) { size_t current_pos = 0; for (size_t i = 0; i < m_weight_matrices.size(); ++i) { m_weight_matrices[i].set_data(params + current_pos); m_weight_matrices_inference[i].set_data(inference_params + current_pos); m_weight_matrices_backward[i].set_data((m_use_feedback_alignment ? backward_params : params) + current_pos); m_weight_matrices_full_precision[i].set_data(params_full_precision + current_pos); m_gradient_matrices[i].set_data(gradients + current_pos); current_pos += m_weight_matrices[i].n_elements(); } for (size_t i = 0; i < m_weight_matrices_full_precision.size(); ++i) { if (m_activation == Activation::Sine) { if (i == 0) { m_weight_matrices_full_precision[i].initialize_siren_uniform_first(rnd, scale); } else { m_weight_matrices_full_precision[i].initialize_siren_uniform(rnd, scale); } } else if (m_use_feedback_alignment) { m_weight_matrices_full_precision[i].initialize_fa_uniform_forward(rnd, scale); } else { m_weight_matrices_full_precision[i].initialize_xavier_uniform(rnd, scale); } } // Initialize backward params for feedback alignment if (m_use_feedback_alignment) { for (size_t i = 0; i < m_weight_matrices_backward.size(); ++i) { m_weight_matrices_backward[i].initialize_fa_uniform_backward(rnd, scale); } } } template class FullyFusedMLP<network_precision_t, 256>; template class FullyFusedMLP<network_precision_t, 128>; template class FullyFusedMLP<network_precision_t, 64>; template class FullyFusedMLP<network_precision_t, 32>; TCNN_NAMESPACE_END
6ff165b5139a217359e5517c3e39e90ef61c0e95.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *//* */ /** @file fully_fused_mlp.cu * @author Thomas Müller and Nikolaus Binder, NVIDIA * @brief Fully fused CUDA implementation of a multi-layer perceptron. Supports online training * and simultaneous inference. */ #include <tiny-cuda-nn/networks/fully_fused_mlp.h> #include <tiny-cuda-nn/cutlass_matmul.h> #include <tiny-cuda-nn/misc_kernels.h> #include <mma.h> #include <cuda_pipeline.h> TCNN_NAMESPACE_BEGIN static constexpr float K_ACT = 10.0f; void check_shmem_error(cudaError_t error) { if (error != cudaSuccess) { throw std::runtime_error{"FullyFusedMLP: insufficient shared memory available on the GPU. Reduce `n_neurons` or use `CutlassMLP` (better compatibility but slower) instead."}; } } template <typename fragment_t> __device__ void warp_activation(Activation activation, fragment_t& frag) { switch (activation) { case Activation::None: return; case Activation::ReLU: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] *= (__half)(frag.x[t] > (__half)0.0f); } return; case Activation::Exponential: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] = (__half)(__expf(frag.x[t])); } return; case Activation::Sigmoid: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] = (__half)(logistic(frag.x[t])); } return; case Activation::Sine: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] = (__half)(__sinf(frag.x[t])); } return; case Activation::Squareplus: #pragma unroll for (int t=0; t < frag.num_elements; t++) { float x = (float)frag.x[t] * K_ACT; frag.x[t] = (__half)(0.5f * (x + sqrtf(x * x + 4)) / K_ACT); } return; case Activation::Softplus: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] = (__half)(__logf(__expf((float)frag.x[t] * K_ACT) + 1.0f) / K_ACT); } return; default: // Unsupported activation // assert(false); // Commented out due to isolated strange side-effects on Windows return; } } template <typename fragment_t, typename forward_fragment_t> __device__ void warp_activation_backward(Activation activation, fragment_t& frag, const forward_fragment_t& forward_frag) { switch (activation) { case Activation::None: return; case Activation::ReLU: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] *= (__half)(forward_frag.x[t] > (__half)0.0f); } return; case Activation::Exponential: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] *= forward_frag.x[t]; } return; case Activation::Sigmoid: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] *= (__half)(forward_frag.x[t] * ((__half)1.0f - forward_frag.x[t])); } return; case Activation::Sine: // Sine requires stored pre-activations, which we don't have. We only // write out the post-activations. // assert(false); // Commented out due to isolated strange side-effects on Windows return; case Activation::Squareplus: #pragma unroll for (int t=0; t < frag.num_elements; t++) { float y = (float)forward_frag.x[t] * K_ACT; frag.x[t] *= (__half)(y * y / (y * y + 1)); } return; case Activation::Softplus: #pragma unroll for (int t=0; t < frag.num_elements; t++) { frag.x[t] *= (__half)(1.0f - __expf(-(float)forward_frag.x[t] * K_ACT)); } return; default: // Unsupported activation // assert(false); // Commented out due to isolated strange side-effects on Windows return; } } template <int WIDTH, int BLOCK_DIM_Z, int N_ITERS, typename OUT_T, bool BACKWARD=false> __device__ void threadblock_layer(Activation activation, __half* __restrict__ act_shmem, const __half* __restrict__ weights_this_layer, OUT_T* __restrict__ out_intermediate_threadblock_this_layer, const OUT_T* __restrict__ activation_aux = nullptr) { // act_shmem contains the intermediate activations (shared memory) of the thread block's chunk of the batch. // Can be forward activations or backward activations, depending on caller. // weights_this_layer points to the weight matrix of the current layer. // out_intermediate_threadblock_this_layer points to the location where intermediate activations produced by the thread block should be written to. // Can be nullptr if nothing should be written. // activation_aux points to additional arguments that the activation function may depend on. Points to the hidden forward activations when computing backward activations. constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; constexpr uint32_t N_BLOCKS = WIDTH / 16; using namespace nvcuda; using namespace nvcuda::experimental; // If we're performing the backward pass, weights must be loaded in transposed form, which // is achieved by interpreting the memory in row_major instead of col_major order. using weights_layout_t = std::conditional_t<BACKWARD, wmma::row_major, wmma::col_major>; // Fragments wmma::fragment<wmma::matrix_a, 16, 16, 16, __half, wmma::row_major> act_frag; wmma::fragment<wmma::matrix_b, 16, 16, 16, __half, weights_layout_t> weights_frag[N_BLOCKS]; wmma::fragment<wmma::accumulator, 16, 16, 16, OUT_T> result_frag[N_ITERS]; // Indices const uint32_t li = threadIdx.x; // index in warp ("lane index") const uint32_t wi = threadIdx.y; // index in block ("warp index") const uint32_t lane_offset = (8 * li) % WIDTH; const uint32_t row = (8 * li + wi * 8 * 32) / WIDTH; const uint32_t weights_col = 16 * wi; __syncthreads(); // Load N_BLOCKS chunks of weights from global memory into registers. #pragma unroll for (uint32_t i = 0; i < N_BLOCKS; ++i) { if (BACKWARD) { // If we're performing the backward pass, additional index swizzling is needed to // load the weights in transposed form. wmma::load_matrix_sync(weights_frag[i], weights_this_layer + 16 * i * WIDTH + weights_col, WIDTH); } else { wmma::load_matrix_sync(weights_frag[i], weights_this_layer + 16 * i + weights_col * WIDTH, WIDTH); } } #pragma unroll for (int l = 0; l < N_ITERS; ++l) { wmma::fill_fragment(result_frag[l], 0.0f); #pragma unroll for (uint32_t i = 0; i < N_BLOCKS; ++i) { // Load a chunk of intermediate activations from shared memory and multiply with chunk of weights wmma::load_matrix_sync(act_frag, act_shmem + 16 * i + (16 * (threadIdx.z + l * BLOCK_DIM_Z)) * (WIDTH + SKEW), WIDTH + SKEW); wmma::mma_sync(result_frag[l], act_frag, weights_frag[i], result_frag[l]); } // Activation if (BACKWARD) { // Load the temporary forward matrix for the relu transfer wmma::load_matrix_sync(act_frag, activation_aux + weights_col + (threadIdx.z + l * BLOCK_DIM_Z) * 16 * WIDTH, WIDTH); warp_activation_backward(activation, result_frag[l], act_frag); } else { warp_activation(activation, result_frag[l]); } } __syncthreads(); #pragma unroll for (int l = 0; l < N_ITERS; ++l) { wmma::store_matrix_sync(act_shmem + weights_col + (threadIdx.z + l * BLOCK_DIM_Z) * 16 * (WIDTH + SKEW), result_frag[l], WIDTH + SKEW, wmma::mem_row_major); } if (out_intermediate_threadblock_this_layer != nullptr) { __syncthreads(); #pragma unroll for (int l = 0; l < N_ITERS; ++l) { *(int4*)&out_intermediate_threadblock_this_layer[lane_offset + (row + 16 * (threadIdx.z + l * BLOCK_DIM_Z)) * WIDTH] = *(int4*)&act_shmem[lane_offset + (row + 16 * (threadIdx.z + l * BLOCK_DIM_Z)) * (WIDTH + SKEW)]; } } } template <int WIDTH, int BLOCK_DIM_Z, int N_ITERS> __device__ void threadblock_load_input_static(__half* __restrict__ act_shmem, const __half* __restrict__ input_threadblock) { // act_shmem will be filled by the thread block's chunk of input_threadblock constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; // Indices const uint32_t li = threadIdx.x; // index in warp ("lane index") const uint32_t wi = threadIdx.y; // index in block ("warp index") const uint32_t lane_offset = (8 * li) % WIDTH; const uint32_t row = (8 * li + wi * 8 * 32) / WIDTH; #pragma unroll for (int i = 0; i < N_ITERS; ++i) { *(int4*)&act_shmem[lane_offset + (row + 16 * (threadIdx.z + i * BLOCK_DIM_Z)) * (WIDTH + SKEW)] = *(int4*)&input_threadblock[lane_offset + (row + 16 * (threadIdx.z + i * BLOCK_DIM_Z)) * WIDTH]; } } template <int WIDTH, int BLOCK_DIM_Z, int N_ITERS, Activation ACTIVATION, typename OUTPUT_LAYOUT> __global__ void kernel_mlp_fused_backward(const __half* __restrict__ dL_doutput, const __half* __restrict__ weights, __half* __restrict__ out_intermediate, const __half* __restrict__ forward, __half* __restrict__ dL_dinput, const __half* __restrict__ weights_first_layer, const uint32_t batch_size, const uint32_t out_width, const uint32_t n_hidden_matmuls) { // `dL_doutput` points to the input matrix of the backward pass, i.e. the loss gradients. Assumed to be 16 neurons wide. // `weights` points to the weight matrices (contiguous in memory). // `out_intermediate` points to the memory where backpropagated activation gradients should be written. // `forward` points to the memory where the intermediate activations of the forward pass are located. (needed for activation backprop) constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; // Indices const uint32_t li = threadIdx.x; // index in warp ("lane index") const uint32_t wi = threadIdx.y; // index in block ("warp index") const uint32_t bi = blockIdx.x; // block index // Shared memory contains the intermediate activations of blockDim.y*16 elements. // A skew is applied to the matrix storage to avoid bank conflicts. extern __shared__ __half shmem[]; __half* act_shmem = shmem; const uint32_t lane_offset = (8 * li) % WIDTH; const uint32_t row = (8 * li + wi * 8 * 32) / WIDTH; // Multipying one 16-row chunk of intermediate activations with the weight matrix requires all warps of the block. // Thus, each block computes exactly one 16-row chunk of the next layer's intermediate activations. const uint32_t elem_idx_base = 16 * bi * N_ITERS * BLOCK_DIM_Z; const uint32_t elem_idx = elem_idx_base + 16 * threadIdx.z; const uint32_t layer_stride = WIDTH * WIDTH; const uint32_t output_stride = WIDTH * batch_size; // Backprop through last layer if (out_width <= 16) { using namespace nvcuda; using namespace nvcuda::experimental; // Fragments in registers wmma::fragment<wmma::matrix_a, 16, 16, 16, __half, OUTPUT_LAYOUT> act_frag; wmma::fragment<wmma::matrix_b, 16, 16, 16, __half, wmma::row_major> weights_frag; wmma::fragment<wmma::accumulator, 16, 16, 16, __half> result_frag[N_ITERS]; // Load the relevant chunk of the last layer's weight matrix from global memory into registers const uint32_t weights_col = 16 * wi; wmma::load_matrix_sync(weights_frag, weights + layer_stride * n_hidden_matmuls + weights_col, WIDTH); #pragma unroll for (int l = 0; l < N_ITERS; ++l) { wmma::fill_fragment(result_frag[l], 0.0f); // Load a chunk of output gradients from shared memory and multiply with previously loaded weights if (std::is_same<OUTPUT_LAYOUT, wmma::row_major>::value) { wmma::load_matrix_sync(act_frag, dL_doutput + (elem_idx + 16 * (threadIdx.z + l * BLOCK_DIM_Z)) * 16, 16); } else { wmma::load_matrix_sync(act_frag, dL_doutput + (elem_idx + 16 * (threadIdx.z + l * BLOCK_DIM_Z)), batch_size); } // NOTE: activation transfer of the _output_ activation is expected to be done _prior_ to calling this kernel // in a separate pass, because the tranfered activation gradient is also needed to compute the weight // gradient of the last weight matrix (see backward()). wmma::mma_sync(result_frag[l], act_frag, weights_frag, result_frag[l]); // Load the temporary forward matrix for the relu transfer wmma::fragment<wmma::matrix_a, 16, 16, 16, __half, wmma::row_major> forward_frag; wmma::load_matrix_sync(forward_frag, forward + output_stride * n_hidden_matmuls + weights_col + (elem_idx + l * BLOCK_DIM_Z * 16) * WIDTH, WIDTH); warp_activation_backward(ACTIVATION, result_frag[l], forward_frag); } __syncthreads(); #pragma unroll for (int l = 0; l < N_ITERS; ++l) { wmma::store_matrix_sync(act_shmem + weights_col + (16 * (threadIdx.z + l * BLOCK_DIM_Z)) * (WIDTH + SKEW), result_frag[l], WIDTH + SKEW, wmma::mem_row_major); } __syncthreads(); #pragma unroll for (int i = 0; i < N_ITERS; ++i) { *(int4*)&out_intermediate[lane_offset + (row + elem_idx + i * BLOCK_DIM_Z * 16) * WIDTH] = *(int4*)&act_shmem[lane_offset + (row + 16 * (threadIdx.z + i * BLOCK_DIM_Z)) * (WIDTH + SKEW)]; } } else { // If the output width is larger than 16, we will have used CUTLASS for backpropping through the last layer. // Load the resulting gradients. threadblock_load_input_static<WIDTH, BLOCK_DIM_Z, N_ITERS>(act_shmem, out_intermediate + elem_idx * WIDTH); } // Backprop through hidden layers for (uint32_t k = 0; k < n_hidden_matmuls; ++k) { threadblock_layer<WIDTH, BLOCK_DIM_Z, N_ITERS, __half, true>(ACTIVATION, act_shmem, weights + layer_stride * (n_hidden_matmuls - k - 1), out_intermediate + output_stride * (k + 1) + elem_idx_base * WIDTH, forward + output_stride * (n_hidden_matmuls - k - 1) + elem_idx_base * WIDTH); } // Compute loss gradients w.r.t. input if desired. // THIS CODE ASSUMES THAT THE INPUT WIDTH IS THE SAME AS THE NETWORK WIDTH. // DON'T PASS A NON-NULL dL_dinput IF THIS REQUIREMENT IS NOT MET. if (dL_dinput != nullptr) { threadblock_layer<WIDTH, BLOCK_DIM_Z, N_ITERS, __half, true>(Activation::None, act_shmem, weights_first_layer, dL_dinput + elem_idx_base * WIDTH); } } template <int WIDTH, typename T, Activation ACTIVATION> void mlp_fused_backward( cudaStream_t stream, const GPUMatrix<T, RM>& weights_first_layer, const GPUMatrix<T, RM>& weights, const GPUMatrixDynamic<T>& dL_doutput, GPUMatrix<T>& temporaries, const GPUMatrix<T>& forward, GPUMatrix<T>* dL_dinput, const uint32_t n_hidden_matmuls ) { if constexpr (std::is_same<T, __half>::value) { const uint32_t batch_size = dL_doutput.cols(); const uint32_t out_width = dL_doutput.rows(); constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; constexpr uint32_t N_BLOCKS = WIDTH / 16; if (forward.cols() != batch_size) { throw std::runtime_error{"Batch size of matrices dL_doutput and temporaries doesn't match."}; } const int N_ITERS = WIDTH >= 256 ? 2 : 8; const uint32_t BLOCK_DIM_Z = 1; if (batch_size % (16 * N_ITERS * BLOCK_DIM_Z) != 0) { throw std::runtime_error{"Batch size must be a multiple of " + std::to_string(16 * N_ITERS * BLOCK_DIM_Z) + "."}; } const dim3 threads = { 32u, N_BLOCKS, BLOCK_DIM_Z }; // 32 threads = 1 warp, 8 warps per block for 16 rows, up to 2x 8 warps can share input (does not help vs. 1) uint32_t n_elems_per_block = 16 * BLOCK_DIM_Z * N_ITERS; uint32_t n_blocks = div_round_up(batch_size, n_elems_per_block); int shmem_size = sizeof(__half) * ((16 * BLOCK_DIM_Z * N_ITERS) * (WIDTH + SKEW)); // WIDTH rows of input and 16 * threads.z rows of weights const dim3 blocks = { n_blocks, 1u, 1u }; // The kernels operate with transposed layouts compared with the MLP code if (dL_doutput.layout() == RM) { check_shmem_error(cudaFuncSetAttribute(kernel_mlp_fused_backward<WIDTH, BLOCK_DIM_Z, N_ITERS, ACTIVATION, nvcuda::wmma::col_major>, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem_size)); kernel_mlp_fused_backward<WIDTH, BLOCK_DIM_Z, N_ITERS, ACTIVATION, nvcuda::wmma::col_major><<<blocks, threads, shmem_size, stream>>>(dL_doutput.data(), weights.data(), temporaries.data(), forward.data(), dL_dinput ? dL_dinput->data() : nullptr, weights_first_layer.data(), batch_size, out_width, n_hidden_matmuls); } else { check_shmem_error(cudaFuncSetAttribute(kernel_mlp_fused_backward<WIDTH, BLOCK_DIM_Z, N_ITERS, ACTIVATION, nvcuda::wmma::row_major>, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem_size)); kernel_mlp_fused_backward<WIDTH, BLOCK_DIM_Z, N_ITERS, ACTIVATION, nvcuda::wmma::row_major><<<blocks, threads, shmem_size, stream>>>(dL_doutput.data(), weights.data(), temporaries.data(), forward.data(), dL_dinput ? dL_dinput->data() : nullptr, weights_first_layer.data(), batch_size, out_width, n_hidden_matmuls); } } else { throw std::runtime_error{"The fully fused backward pass only supports __half precision."}; } } template <int WIDTH, int BLOCK_DIM_Z, int N_ITERS, typename OUT_T> __device__ void threadblock_input_layer_forward_dynamic(Activation activation, __half* __restrict__ act_shmem, const __half* __restrict__ input_threadblock, const __half* __restrict__ weights_this_layer, OUT_T* __restrict__ out_intermediate_threadblock_this_layer, const uint32_t in_width) { // act_shmem contains the intermediate activations (shared memory) of the thread block's chunk of the batch // input_threadblock points to the thread block's chunk of the input batch in global memory // weights_this_layer points to the weight matrix of the current layer // out_intermediate_threadblock_this_layer points to the location where intermediate activations produced by the thread block should be written to. // Can be nullptr if nothing should be written. // in_width is the dynamic width of the input layer constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; constexpr uint32_t INPUT_SKEW = 8; constexpr uint32_t N_BLOCKS = WIDTH / 16; using namespace nvcuda; using namespace nvcuda::experimental; // Fragments wmma::fragment<wmma::matrix_a, 16, 16, 16, __half, wmma::row_major> act_frag; wmma::fragment<wmma::matrix_b, 16, 16, 16, __half, wmma::col_major> weights_frag; wmma::fragment<wmma::accumulator, 16, 16, 16, OUT_T> result_frag[N_ITERS]; // Indices const uint32_t li = threadIdx.x; // index in warp ("lane index") const uint32_t wi = threadIdx.y; // index in block ("warp index") const uint32_t lane_offset = (8 * li) % WIDTH; const uint32_t row = (8 * li + wi * 8 * 32) / WIDTH; const uint32_t weights_col = 16 * wi; __half* __restrict__ weights_shmem = act_shmem + BLOCK_DIM_Z * 16 * (in_width + INPUT_SKEW); // Load input weight matrix (fits completely into shared memory) // Each thread can load 8 fp16 elements (16 bytes) at once; we have N_BLOCKS*BLOCK_DIM_Z warps const uint32_t n_elems_per_load = N_BLOCKS * 32 * BLOCK_DIM_Z * 8; const uint32_t thread_elem_idx = (li + wi * 32 + threadIdx.z * N_BLOCKS * 32) * 8; const uint32_t n_elems_b = WIDTH * in_width; #pragma unroll for (uint32_t idx = thread_elem_idx; idx < n_elems_b; idx += n_elems_per_load) { const uint32_t idx_skewed = idx + idx / in_width * INPUT_SKEW; *(int4*)&weights_shmem[idx_skewed] = *(int4*)&weights_this_layer[idx]; } const uint32_t n_tensor_ops = in_width / 16; #pragma unroll for (int l = 0; l < N_ITERS; ++l) { // Load chunk of inputs into shmem. // This is faster than loading it from gmem directly, even though it is only used once. // (Possibly due to latency hiding through staging.) const uint32_t n_elems_a = BLOCK_DIM_Z * 16 * in_width; #pragma unroll for (uint32_t idx = thread_elem_idx; idx < n_elems_a; idx += n_elems_per_load) { const uint32_t idx_skewed = idx + idx / in_width * INPUT_SKEW; *(int4*)&act_shmem[idx_skewed] = *(int4*)&input_threadblock[l * n_elems_a + idx]; } __syncthreads(); wmma::fill_fragment(result_frag[l], 0.0f); #pragma unroll for (uint32_t i = 0; i < n_tensor_ops; ++i) { // Load chunk of inputs and weights from shared memory and multiply them wmma::load_matrix_sync(act_frag, act_shmem + 16 * i + (16 * threadIdx.z) * (in_width + INPUT_SKEW), in_width + INPUT_SKEW); wmma::load_matrix_sync(weights_frag, weights_shmem + 16 * i + weights_col * (in_width + INPUT_SKEW), in_width + INPUT_SKEW); wmma::mma_sync(result_frag[l], act_frag, weights_frag, result_frag[l]); } __syncthreads(); warp_activation(activation, result_frag[l]); } #pragma unroll for (int l = 0; l < N_ITERS; ++l) { wmma::store_matrix_sync(act_shmem + weights_col + (16 * (threadIdx.z + l * BLOCK_DIM_Z)) * (WIDTH + SKEW), result_frag[l], WIDTH + SKEW, wmma::mem_row_major); } if (out_intermediate_threadblock_this_layer != nullptr) { __syncthreads(); #pragma unroll for (int i = 0; i < N_ITERS; ++i) { *(int4*)&out_intermediate_threadblock_this_layer[lane_offset + (row + 16 * (threadIdx.z + i * BLOCK_DIM_Z)) * WIDTH] = *(int4*)&act_shmem[lane_offset + (row + 16 * (threadIdx.z + i * BLOCK_DIM_Z)) * (WIDTH + SKEW)]; } } } template <int WIDTH, int BLOCK_DIM_Z, int N_ITERS, typename OUT_T> __device__ void threadblock_last_layer_forward(Activation activation, __half* __restrict__ act_shmem, const __half* __restrict__ weights_this_layer, OUT_T* __restrict__ out, const uint32_t batch_size, const nvcuda::wmma::layout_t output_layout) { // act_shmem contains the intermediate activations (shared memory) of the thread block's chunk of the batch // weights_this_layer points to the weight matrix of the current layer // out points to the location where the result produced by the thread block should be written to. // Can be nullptr if nothing should be written. constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; constexpr uint32_t N_BLOCKS = WIDTH / 16; using namespace nvcuda; using namespace nvcuda::experimental; // Fragments wmma::fragment<wmma::matrix_a, 16, 16, 16, __half, wmma::row_major> act_frag; wmma::fragment<wmma::matrix_b, 16, 16, 16, __half, wmma::col_major> weights_frag[N_BLOCKS]; wmma::fragment<wmma::accumulator, 16, 16, 16, OUT_T> result_frag; // Indices const uint32_t li = threadIdx.x; // index in warp ("lane index") const uint32_t wi = threadIdx.y; // index in block ("warp index") __half* __restrict__ weights_shmem = act_shmem + N_ITERS * BLOCK_DIM_Z * 16 * (WIDTH + SKEW); const uint32_t weights_row = (8 * li) % WIDTH; const uint32_t weights_col = (8 * li + 8 * 32 * wi) / WIDTH; // Load weight matrix into shared memory for the last multiplication. // Loading into shared memory as opposed to directly into registers is faster // because unlike in the previous layers, each warp uses the same entries of the weight matrix. if (threadIdx.z == 0) { *(int4*)&weights_shmem[weights_row + weights_col * (WIDTH + SKEW)] = *(int4*)&weights_this_layer[weights_row + weights_col * WIDTH]; } __syncthreads(); #pragma unroll for (uint32_t i = 0; i < N_BLOCKS; ++i) wmma::load_matrix_sync(weights_frag[i], weights_shmem + 16 * i, WIDTH + SKEW); // Perform last layer by parallelizing over iters for (uint32_t idx = wi; idx < N_ITERS; idx += N_BLOCKS) { wmma::fill_fragment(result_frag, 0.0f); #pragma unroll for (uint32_t i = 0; i < N_BLOCKS; ++i) { // Load a chunk of intermediate activations from shared memory and multiply with chunk of the weight matrix wmma::load_matrix_sync(act_frag, act_shmem + 16 * i + (16 * (threadIdx.z + idx * BLOCK_DIM_Z)) * (WIDTH + SKEW), WIDTH + SKEW); wmma::mma_sync(result_frag, act_frag, weights_frag[i], result_frag); } warp_activation(activation, result_frag); if (output_layout == wmma::mem_row_major) { wmma::store_matrix_sync(out + (threadIdx.z + idx * BLOCK_DIM_Z) * 16 * 16, result_frag, 16, output_layout); } else { wmma::store_matrix_sync(out + (threadIdx.z + idx * BLOCK_DIM_Z) * 16, result_frag, batch_size, output_layout); } } } template <int WIDTH, int BLOCK_DIM_Z, int N_ITERS> __device__ void threadblock_write_output_static(const __half* __restrict__ act_shmem, __half* __restrict__ output_threadblock) { // output_threadblock will be filled by the thread block's act_shmem constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; // Indices const uint32_t li = threadIdx.x; // index in warp ("lane index") const uint32_t wi = threadIdx.y; // index in block ("warp index") const uint32_t lane_offset = (8 * li) % WIDTH; const uint32_t row = (8 * li + wi * 8 * 32) / WIDTH; __syncthreads(); #pragma unroll for (int i = 0; i < N_ITERS; ++i) { *(int4*)&output_threadblock[lane_offset + (row + 16 * (threadIdx.z + i * BLOCK_DIM_Z)) * WIDTH] = *(int4*)&act_shmem[lane_offset + (row + 16 * (threadIdx.z + i * BLOCK_DIM_Z)) * (WIDTH + SKEW)]; } } template <int WIDTH, int BLOCK_DIM_Z, int N_ITERS, typename OUT_T, Activation ACTIVATION, bool INFERENCE> __global__ void kernel_mlp_fused(const Activation output_activation, const __half* __restrict__ input, const __half* __restrict__ weights, OUT_T* __restrict__ out_intermediate, OUT_T* __restrict__ out, const uint32_t batch_size, const uint32_t in_width, const uint32_t out_width, const uint32_t n_hidden_matmuls, const nvcuda::wmma::layout_t output_layout = nvcuda::wmma::mem_row_major) { // `input` points to the input matrix. Can be any width. // `weights` points to the weight matrices (contiguous in memory). // `out_intermediate` points to the memory where intermediate activations should be written. When performing inference, a value of nullptr is expected (intermediate results are not written). // `out` points to the memory where the network output should be written. (Output width is assumed to be 16 neurons.) // Commented out due to isolated strange side-effects on Windows // if (INFERENCE) { // assert(out_intermediate == nullptr); // } else { // assert(out_intermediate); // } // Shared memory contains the intermediate activations of blockDim.y*16 elements. // In some cases, it also contains the weight matrix for the first and last layer. extern __shared__ __half shmem[]; __half* act_shmem = shmem; // Each block computes exactly one 16-element chunk of the batch. const uint32_t elem_idx = 16 * blockIdx.x * N_ITERS * BLOCK_DIM_Z; // First layer if (in_width == WIDTH) { // If the input has the same width as the network, we can simply use the network's regular layer routine (with static size) // instead of using the slower dynamic input layer routine. threadblock_load_input_static<WIDTH, BLOCK_DIM_Z, N_ITERS>(act_shmem, input + elem_idx * WIDTH); threadblock_layer<WIDTH, BLOCK_DIM_Z, N_ITERS, OUT_T>(ACTIVATION, act_shmem, weights, !INFERENCE ? (out_intermediate + elem_idx * WIDTH) : nullptr); } else { threadblock_input_layer_forward_dynamic<WIDTH, BLOCK_DIM_Z, N_ITERS, OUT_T>(ACTIVATION, act_shmem, input + elem_idx * in_width, weights, !INFERENCE ? (out_intermediate + elem_idx * WIDTH) : nullptr, in_width); } const uint32_t first_layer_size = WIDTH * in_width; const uint32_t layer_stride = WIDTH * WIDTH; const uint32_t output_stride = WIDTH * batch_size; // Hidden layers for (uint32_t k = 0; k < n_hidden_matmuls; ++k) { threadblock_layer<WIDTH, BLOCK_DIM_Z, N_ITERS, OUT_T>(ACTIVATION, act_shmem, weights + first_layer_size + layer_stride * k, !INFERENCE ? (out_intermediate + output_stride * (k + 1) + elem_idx * WIDTH) : nullptr); } if (out_width > 16) { // In the forward pass, intermediate activations are already written out. if constexpr (INFERENCE) { threadblock_write_output_static<WIDTH, BLOCK_DIM_Z, N_ITERS>(act_shmem, out_intermediate + elem_idx * WIDTH); } } else { // Last layer if (output_layout == nvcuda::wmma::mem_row_major) { threadblock_last_layer_forward<WIDTH, BLOCK_DIM_Z, N_ITERS, OUT_T>(output_activation, act_shmem, weights + first_layer_size + layer_stride * n_hidden_matmuls, out + elem_idx * 16, 16, output_layout); } else { threadblock_last_layer_forward<WIDTH, BLOCK_DIM_Z, N_ITERS, OUT_T>(output_activation, act_shmem, weights + first_layer_size + layer_stride * n_hidden_matmuls, out + elem_idx, batch_size, output_layout); } } } template <int WIDTH, typename T, Activation ACTIVATION, bool INFERENCE> void mlp_fused_forward( cudaStream_t stream, Activation output_activation, const GPUMatrix<T, RM>& weights, const GPUMatrix<T>& input, GPUMatrix<T>& output_intermediate, GPUMatrixDynamic<T>& output, const uint32_t n_hidden_layers ) { if constexpr (std::is_same<T, __half>::value) { const uint32_t batch_size = input.cols(); const uint32_t in_width = input.rows(); const uint32_t out_width = output.rows(); constexpr uint32_t SKEW = WIDTH % 16 == 0 ? 8 : 0; // <- always going to be 8 as we only support multiple-of-16 widths constexpr uint32_t INPUT_SKEW = 8; // <- likewise with inputs constexpr uint32_t N_BLOCK_ROWS = WIDTH / 16; static_assert(WIDTH % 16 == 0, "Width must be a multiply of 16."); if (in_width % 16 != 0) { throw std::runtime_error{"Inputs must have a multiple-of-16 elements."}; } if (weights.rows() != WIDTH) { throw std::runtime_error{"The fully fused forward pass only works with WIDTH-sized matrices."}; } if (weights.cols() % 16 != 0) { throw std::runtime_error{std::string("weights must have a multiple-of-16 number of columns. ") + std::to_string(weights.cols())}; } if (output_intermediate.cols() != batch_size) { throw std::runtime_error{"Batch size of inputs and output_intermediate doesn't match."}; } if (output.cols() != batch_size) { throw std::runtime_error{"Batch size of inputs and outputs doesn't match."}; } const int N_ITERS = WIDTH >= 256 ? 2 : 8; const uint32_t BLOCK_DIM_Z = (INFERENCE && WIDTH == 128) ? 2 : 1; if (batch_size % (16 * N_ITERS * BLOCK_DIM_Z) != 0) { throw std::runtime_error{"Batch size must be a multiple of " + std::to_string(16 * N_ITERS * BLOCK_DIM_Z) + "."}; } const dim3 threads = { 32u, N_BLOCK_ROWS, BLOCK_DIM_Z }; // 32 threads = 1 warp, N_BLOCK_ROWS warps per block for 16 rows, up to 2x 8 warps can share input (does not help vs. 1) uint32_t n_elems_per_block = 16 * BLOCK_DIM_Z * N_ITERS; uint32_t n_blocks = div_round_up(batch_size, n_elems_per_block); size_t shmem_size = sizeof(__half) * (16 + 16 * BLOCK_DIM_Z * N_ITERS) * (WIDTH + SKEW); // 16*WIDTH rows of weights (for the last layer; others are in registers only) + 16*WIDTH*BLOCK_DIM_Z*N_ITERS rows of intermediate activations if (in_width != WIDTH) { // If the input width is dynamic, the input weight matrix as well as part of the input will live in extra shared memory shmem_size = std::max(shmem_size, sizeof(__half) * (WIDTH + 16 * BLOCK_DIM_Z) * (in_width + INPUT_SKEW)); } const dim3 blocks = { n_blocks, 1u, 1u }; check_shmem_error(cudaFuncSetAttribute(kernel_mlp_fused<WIDTH, BLOCK_DIM_Z, N_ITERS, __half, ACTIVATION, INFERENCE>, cudaFuncAttributeMaxDynamicSharedMemorySize, (int)shmem_size)); kernel_mlp_fused<WIDTH, BLOCK_DIM_Z, N_ITERS, __half, ACTIVATION, INFERENCE><<<blocks, threads, shmem_size, stream>>>( output_activation, input.data(), weights.data(), output_intermediate.data(), output.data(), batch_size, in_width, out_width, n_hidden_layers, output.layout() == RM ? nvcuda::wmma::mem_col_major : nvcuda::wmma::mem_row_major // The kernels operate with transposed layouts compared with the MLP code ); } else { throw std::runtime_error{"The fully fused forward pass only supports __half precision."}; } } template <typename T, int WIDTH> FullyFusedMLP<T, WIDTH>::FullyFusedMLP( uint32_t input_width, uint32_t output_width, uint32_t n_hidden_layers, bool use_feedback_alignment, Activation activation, Activation output_activation ) : m_input_width{input_width}, m_network_width{WIDTH}, m_output_width{output_width}, m_n_hidden_layers{n_hidden_layers}, m_use_feedback_alignment{use_feedback_alignment}, m_activation{activation}, m_output_activation{output_activation} { if (m_n_hidden_layers <= 0) { throw std::runtime_error("FullyFusedMLP requires at least 1 hidden layer (3 layers in total)."); } m_n_hidden_matmuls = n_hidden_layers-1; m_padded_output_width = next_multiple(m_output_width, tensorcore_width); // Create matrices related to weights m_weight_matrices.emplace_back(nullptr, m_network_width, m_input_width); m_weight_matrices_inference.emplace_back(nullptr, m_network_width, m_input_width); m_weight_matrices_backward.emplace_back(nullptr, m_network_width, m_input_width); m_weight_matrices_full_precision.emplace_back(nullptr, m_network_width, m_input_width); m_gradient_matrices.emplace_back(nullptr, m_network_width, m_input_width); for (uint32_t i = 0; i < m_n_hidden_matmuls; ++i) { m_weight_matrices.emplace_back(nullptr, m_network_width, m_network_width); m_weight_matrices_inference.emplace_back(nullptr, m_network_width, m_network_width); m_weight_matrices_backward.emplace_back(nullptr, m_network_width, m_network_width); m_weight_matrices_full_precision.emplace_back(nullptr, m_network_width, m_network_width); m_gradient_matrices.emplace_back(nullptr, m_network_width, m_network_width); } m_weight_matrices.emplace_back(nullptr, m_padded_output_width, m_network_width); m_weight_matrices_inference.emplace_back(nullptr, m_padded_output_width, m_network_width); m_weight_matrices_backward.emplace_back(nullptr, m_padded_output_width, m_network_width); m_weight_matrices_full_precision.emplace_back(nullptr, m_padded_output_width, m_network_width); m_gradient_matrices.emplace_back(nullptr, m_padded_output_width, m_network_width); // Determine total number of memory entries and set it m_total_n_params = 0; for (const auto& m : m_weight_matrices) { m_total_n_params += m.n_elements(); } // Buffers to keep data from the forward and backward pass m_forward_tmp.resize(m_n_hidden_layers); m_backward_tmp.resize(m_n_hidden_layers); // 1 stream per matmul m_training_splitk_streams.resize(m_n_hidden_layers + 1); m_training_splitk_events.resize(m_n_hidden_layers + 1); for (size_t i = 0; i < m_training_splitk_streams.size(); ++i) { CUDA_CHECK_THROW(cudaStreamCreate(&m_training_splitk_streams[i])); CUDA_CHECK_THROW(cudaEventCreate(&m_training_splitk_events[i])); } } template <typename T, int WIDTH> FullyFusedMLP<T, WIDTH>::~FullyFusedMLP() { for (size_t i = 0; i < m_training_splitk_streams.size(); ++i) { free_workspace(m_training_splitk_streams[i]); CUDA_CHECK_PRINT(cudaEventDestroy(m_training_splitk_events[i])); CUDA_CHECK_PRINT(cudaStreamDestroy(m_training_splitk_streams[i])); } } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::inference(cudaStream_t stream, const GPUMatrix<T>& input, GPUMatrix<float>& output) { inference_mixed_precision(stream, input, m_inference_output_tmp); const uint32_t n_elements = (uint32_t)output.n_elements(); trim_and_cast<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, m_padded_output_width, m_output_width, m_inference_output_tmp.data(), output.data()); } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::inference_mixed_precision(cudaStream_t stream, const GPUMatrix<T>& input, GPUMatrixDynamic<T>& output, bool use_inference_matrices) { // Various error checks if (input.m() != m_input_width) { throw std::runtime_error(std::string("Input has incorrect width: ") + std::to_string(input.m()) + "!=" + std::to_string(m_input_width)); } if (&output != &m_inference_output_tmp && output.m() != m_padded_output_width) { throw std::runtime_error(std::string("Output has incorrect width: ") + std::to_string(output.m()) + "!=" + std::to_string(m_output_width)); } if (&output != &m_inference_output_tmp && input.n() != output.n()) { throw std::runtime_error(std::string("Input and output don't have matching batch size: ") + std::to_string(input.n()) + "!=" + std::to_string(output.n())); } // Make sure our temporary buffers have the correct size for the given batch size uint32_t batch_size = input.n(); if (m_inference_tmp.n() != batch_size) { allocate_inference_buffers(batch_size); } const WeightUsage weight_usage = use_inference_matrices ? WeightUsage::Inference : WeightUsage::Forward; // ASSUMPTION: weight matrices are contiguous in memory switch (m_activation) { case Activation::None: mlp_fused_forward<WIDTH, T, Activation::None, true>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_inference_tmp, output, m_n_hidden_matmuls); break; case Activation::Exponential: mlp_fused_forward<WIDTH, T, Activation::Exponential, true>(stream, m_output_activation, input_weight_matrix(weight_usage), input, m_inference_tmp, output, m_n_hidden_matmuls); break; case Activation::Sigmoid: mlp_fused_forward<WIDTH, T, Activation::Sigmoid, true>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_inference_tmp, output, m_n_hidden_matmuls); break; case Activation::ReLU: mlp_fused_forward<WIDTH, T, Activation::ReLU, true>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_inference_tmp, output, m_n_hidden_matmuls); break; case Activation::Squareplus: mlp_fused_forward<WIDTH, T, Activation::Squareplus, true>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_inference_tmp, output, m_n_hidden_matmuls); break; case Activation::Softplus: mlp_fused_forward<WIDTH, T, Activation::Softplus, true>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_inference_tmp, output, m_n_hidden_matmuls); break; default: throw std::runtime_error{"Unsupported activation."}; } // If we have more than 16 output dimensions, these will be taken care of by CUTLASS rather than // the fully fused kernel (which will have written out the second-to-last layer activations). if (m_output_width > 16) { if (output.layout() == CM) { auto tmp = GPUMatrix<T>{output}; compute_inference_layer<LastLayer>(stream, m_output_activation, output_weight_matrix(weight_usage), m_inference_tmp, tmp, (T)m_output_activation_param); } else { auto tmp = GPUMatrix<T, RM>{output}; compute_inference_layer<LastLayer>(stream, m_output_activation, output_weight_matrix(weight_usage), m_inference_tmp, tmp, (T)m_output_activation_param); } } } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::forward(cudaStream_t stream, const GPUMatrix<T>& input, GPUMatrixDynamic<T>& output, bool use_inference_matrices, bool prepare_input_gradients) { // Various error checks if (input.m() != m_input_width) { throw std::runtime_error(std::string("Input has incorrect width: ") + std::to_string(input.m()) + "!=" + std::to_string(m_input_width)); } if (output.m() != m_padded_output_width) { throw std::runtime_error(std::string("Output has incorrect width (must be padded): ") + std::to_string(output.m()) + "!=" + std::to_string(m_padded_output_width)); } if (input.n() != output.n()) { throw std::runtime_error(std::string("Input and output don't have matching batch size: ") + std::to_string(input.n()) + "!=" + std::to_string(output.n())); } // Make sure our temporary buffers have the correct size for the given batch size uint32_t batch_size = input.n(); if (m_forward_tmp.front().n() != batch_size) { allocate_forward_buffers(batch_size); } const WeightUsage weight_usage = use_inference_matrices ? WeightUsage::Inference : WeightUsage::Forward; // ASSUMPTION: weight matrices & forward_tmp matrices are contiguous in memory switch (m_activation) { case Activation::None: mlp_fused_forward<WIDTH, T, Activation::None, false>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_forward_tmp.at(0), output, m_n_hidden_matmuls); break; case Activation::Exponential: mlp_fused_forward<WIDTH, T, Activation::Exponential, false>(stream, m_output_activation, input_weight_matrix(weight_usage), input, m_forward_tmp.at(0), output, m_n_hidden_matmuls); break; case Activation::Sigmoid: mlp_fused_forward<WIDTH, T, Activation::Sigmoid, false>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_forward_tmp.at(0), output, m_n_hidden_matmuls); break; case Activation::ReLU: mlp_fused_forward<WIDTH, T, Activation::ReLU, false>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_forward_tmp.at(0), output, m_n_hidden_matmuls); break; case Activation::Squareplus: mlp_fused_forward<WIDTH, T, Activation::Squareplus, false>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_forward_tmp.at(0), output, m_n_hidden_matmuls); break; case Activation::Softplus: mlp_fused_forward<WIDTH, T, Activation::Softplus, false>( stream, m_output_activation, input_weight_matrix(weight_usage), input, m_forward_tmp.at(0), output, m_n_hidden_matmuls); break; default: throw std::runtime_error{"Unsupported activation."}; } // If we have more than 16 output dimensions, these will be taken care of by CUTLASS rather than // the fully fused kernel (which will have written out the second-to-last layer activations). if (m_output_width > 16) { if (output.layout() == CM) { auto tmp = GPUMatrix<T>{output}; compute_inference_layer<LastLayer>(stream, m_output_activation, output_weight_matrix(weight_usage), m_forward_tmp.back(), tmp, (T)m_output_activation_param); } else { auto tmp = GPUMatrix<T, RM>{output}; compute_inference_layer<LastLayer>(stream, m_output_activation, output_weight_matrix(weight_usage), m_forward_tmp.back(), tmp, (T)m_output_activation_param); } } } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::backward( cudaStream_t stream, const GPUMatrix<T>& input, const GPUMatrixDynamic<T>& output, const GPUMatrixDynamic<T>& dL_doutput, GPUMatrix<T>* dL_dinput, bool use_inference_matrices, bool compute_param_gradients ) { if (dL_doutput.m() != m_padded_output_width) { throw std::runtime_error(std::string("Output gradients have incorrect width (must be padded): ") + std::to_string(dL_doutput.m()) + "!=" + std::to_string(m_padded_output_width)); } // Make sure our temporary buffers have the correct size for the given batch size uint32_t batch_size = dL_doutput.n(); if (m_backward_tmp.front().n() != batch_size) { allocate_backward_buffers(batch_size); } // Compute transfer of output activation in-place... it's treated specially for performance reasons const uint32_t n_elements = (uint32_t)dL_doutput.n_elements(); switch (m_output_activation) { case Activation::None: break; case Activation::Exponential: exp_transfer_output<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, output.data(), dL_doutput.data(), m_backward_output_tmp.data()); break; case Activation::Sigmoid: logistic_transfer_output<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, output.data(), dL_doutput.data(), m_backward_output_tmp.data()); break; case Activation::ReLU: relu_transfer_output<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, output.data(), dL_doutput.data(), m_backward_output_tmp.data()); break; default: throw std::runtime_error{"Unsupported output activation."}; } // Backprop // - weight_gradient.T = activation * output_gradient.T // - input_gradient = weights.T * output_gradient // - RELU: pre_activation_gradinet = post_activation_gradient if val > 0 else 0 const WeightUsage weight_usage = use_inference_matrices ? WeightUsage::Inference : WeightUsage::Backward; Activation transfer_activation = m_activation == Activation::None ? Activation::None : Activation::ReLUTransfer; { // T normalization = (T)(1.0f / batch_size); T normalization = (T)(1.0f); int split_k_factor = batch_size / std::min((uint32_t)(1 << 12), batch_size); m_backward_output_tmp.set_layout(dL_doutput.layout()); const GPUMatrixDynamic<T>& tmp_dL_doutput = m_output_activation == Activation::None ? dL_doutput : m_backward_output_tmp; uint32_t tmp_idx = m_n_hidden_matmuls; uint32_t backward_tmp_idx = 0; if (compute_param_gradients) { // Output layer cudaEventRecord(m_training_splitk_events.at(backward_tmp_idx), stream); cudaStreamWaitEvent(m_training_splitk_streams.at(backward_tmp_idx), m_training_splitk_events.at(backward_tmp_idx), 0); // Compute weight gradients if (output.layout() == CM) { fc_multiply_split_k<Activation::None, LastLayerK>(m_training_splitk_streams.at(backward_tmp_idx), GPUMatrix<T>{tmp_dL_doutput}, m_forward_tmp.at(tmp_idx).transposed(), output_gradient_matrix(), split_k_factor, normalization); } else { fc_multiply_split_k<Activation::None, LastLayerK>(m_training_splitk_streams.at(backward_tmp_idx), GPUMatrix<T, RM>{tmp_dL_doutput}, m_forward_tmp.at(tmp_idx).transposed(), output_gradient_matrix(), split_k_factor, normalization); } cudaEventRecord(m_training_splitk_events.at(backward_tmp_idx), m_training_splitk_streams.at(backward_tmp_idx)); } // If the output width is larger than 16 dims, we use cutlass to backpropagate through the last layer // rather than fusing it with our kernel. if (m_output_width > 16) { switch (transfer_activation) { case Activation::None: if (output.layout() == CM) { fc_multiply<Activation::None, FullLayer>(stream, output_weight_matrix(weight_usage).transposed(), GPUMatrix<T>{tmp_dL_doutput}, m_forward_tmp.at(tmp_idx), m_backward_tmp.at(backward_tmp_idx)); } else { fc_multiply<Activation::None, FullLayer>(stream, output_weight_matrix(weight_usage).transposed(), GPUMatrix<T, RM>{tmp_dL_doutput}, m_forward_tmp.at(tmp_idx), m_backward_tmp.at(backward_tmp_idx)); } break; case Activation::ReLUTransfer: if (output.layout() == CM) { fc_multiply<Activation::ReLUTransfer, FullLayer>(stream, output_weight_matrix(weight_usage).transposed(), GPUMatrix<T>{tmp_dL_doutput}, m_forward_tmp.at(tmp_idx), m_backward_tmp.at(backward_tmp_idx)); } else { fc_multiply<Activation::ReLUTransfer, FullLayer>(stream, output_weight_matrix(weight_usage).transposed(), GPUMatrix<T, RM>{tmp_dL_doutput}, m_forward_tmp.at(tmp_idx), m_backward_tmp.at(backward_tmp_idx)); } break; default: throw std::runtime_error{"Unsupported activation transfer."}; }; } // ASSUMPTION: weight matrices & forward_tmp matrices are contiguous in memory auto dL_dinput_fused = input.m() == m_forward_tmp.at(0).m() ? dL_dinput : nullptr; // Only let the fully fused kernel compute gradients w.r.t. the input, if the input layer has the same size as the other layers switch (m_activation) { case Activation::None: mlp_fused_backward<WIDTH, T, Activation::None>( stream, input_weight_matrix(weight_usage), weight_matrix_at(weight_usage, 0), tmp_dL_doutput, m_backward_tmp.at(backward_tmp_idx), m_forward_tmp.at(0), dL_dinput_fused, m_n_hidden_matmuls); break; case Activation::Exponential: mlp_fused_backward<WIDTH, T, Activation::Exponential>(stream, input_weight_matrix(weight_usage), weight_matrix_at(weight_usage, 0), tmp_dL_doutput, m_backward_tmp.at(backward_tmp_idx), m_forward_tmp.at(0), dL_dinput_fused, m_n_hidden_matmuls); break; case Activation::Sigmoid: mlp_fused_backward<WIDTH, T, Activation::Sigmoid>( stream, input_weight_matrix(weight_usage), weight_matrix_at(weight_usage, 0), tmp_dL_doutput, m_backward_tmp.at(backward_tmp_idx), m_forward_tmp.at(0), dL_dinput_fused, m_n_hidden_matmuls); break; case Activation::ReLU: mlp_fused_backward<WIDTH, T, Activation::ReLU>( stream, input_weight_matrix(weight_usage), weight_matrix_at(weight_usage, 0), tmp_dL_doutput, m_backward_tmp.at(backward_tmp_idx), m_forward_tmp.at(0), dL_dinput_fused, m_n_hidden_matmuls); break; case Activation::Squareplus: mlp_fused_backward<WIDTH, T, Activation::Squareplus>( stream, input_weight_matrix(weight_usage), weight_matrix_at(weight_usage, 0), tmp_dL_doutput, m_backward_tmp.at(backward_tmp_idx), m_forward_tmp.at(0), dL_dinput_fused, m_n_hidden_matmuls); break; case Activation::Softplus: mlp_fused_backward<WIDTH, T, Activation::Softplus>( stream, input_weight_matrix(weight_usage), weight_matrix_at(weight_usage, 0), tmp_dL_doutput, m_backward_tmp.at(backward_tmp_idx), m_forward_tmp.at(0), dL_dinput_fused, m_n_hidden_matmuls); break; default: throw std::runtime_error{"Unsupported activation."}; } tmp_idx -= 1; ++backward_tmp_idx; // layers for (uint32_t i = 0; i < m_n_hidden_matmuls; ++i) { uint32_t matrix_idx = m_n_hidden_matmuls - i - 1; if (compute_param_gradients) { cudaEventRecord(m_training_splitk_events.at(backward_tmp_idx), stream); cudaStreamWaitEvent(m_training_splitk_streams.at(backward_tmp_idx), m_training_splitk_events.at(backward_tmp_idx), 0); fc_multiply_split_k<Activation::None, FullLayerK>(m_training_splitk_streams.at(backward_tmp_idx), m_backward_tmp.at(backward_tmp_idx-1), m_forward_tmp.at(tmp_idx).transposed(), gradient_matrix_at(matrix_idx), split_k_factor, normalization); cudaEventRecord(m_training_splitk_events.at(backward_tmp_idx), m_training_splitk_streams.at(backward_tmp_idx)); } tmp_idx -= 1; ++backward_tmp_idx; } if (compute_param_gradients) { cudaEventRecord(m_training_splitk_events.at(backward_tmp_idx), stream); cudaStreamWaitEvent(m_training_splitk_streams.at(backward_tmp_idx), m_training_splitk_events.at(backward_tmp_idx), 0); fc_multiply_split_k<Activation::None, FullLayerK>(m_training_splitk_streams.at(backward_tmp_idx), m_backward_tmp.at(backward_tmp_idx-1), input.transposed(), input_gradient_matrix(), split_k_factor, normalization); cudaEventRecord(m_training_splitk_events.at(backward_tmp_idx), m_training_splitk_streams.at(backward_tmp_idx)); } // If requested and if the fully fused kernel didn't already take care of it, compute sensitivity of loss w.r.t. inputs if (dL_dinput && input.m() != m_forward_tmp.at(0).m()) { // TODO: optimization opportunity to only compute sensitivity w.r.t selected SUBSET of inputs. Useful for NFs, where conditional dims stay the same. fc_multiply<Activation::None, FullLayer>(stream, input_weight_matrix(weight_usage).transposed(), m_backward_tmp.at(backward_tmp_idx-1), *dL_dinput); } } if (compute_param_gradients) { // All the per-layer split-k matrix multiplications summing over // the batch are computed in parallel streams to the actual // backpropagation. Here, we need to wait for all of these to complete. for (auto& event : m_training_splitk_events) { cudaStreamWaitEvent(stream, event, 0); } } } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::allocate_inference_buffers(uint32_t batch_size) { m_inference_tmp.set_size(m_network_width, batch_size); m_inference_output_tmp.set_size(m_padded_output_width, batch_size); GPUMatrixBase::allocate_shared_memory( m_inference_buffer, { &m_inference_tmp, &m_inference_output_tmp, } ); } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::allocate_forward_buffers(uint32_t batch_size) { for (size_t i = 0; i < m_forward_tmp.size(); ++i) { m_forward_tmp[i].set_size(m_network_width, batch_size); } GPUMatrixBase::allocate_shared_memory(m_forward_buffer, m_forward_tmp); } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::allocate_backward_buffers(uint32_t batch_size) { std::vector<GPUMatrixBase*> matrix_pointers = {&m_backward_output_tmp}; m_backward_output_tmp.set_size(m_padded_output_width, batch_size); for (uint32_t i = 0; i < (uint32_t)m_backward_tmp.size(); ++i) { m_backward_tmp[i].set_size(m_network_width, batch_size); matrix_pointers.emplace_back(&m_backward_tmp[i]); } GPUMatrixBase::allocate_shared_memory(m_backward_buffer, matrix_pointers); } template <typename T, int WIDTH> void FullyFusedMLP<T, WIDTH>::initialize_params(std::mt19937& rnd, float* params_full_precision, T* params, T* inference_params, T* backward_params, T* gradients, float scale) { size_t current_pos = 0; for (size_t i = 0; i < m_weight_matrices.size(); ++i) { m_weight_matrices[i].set_data(params + current_pos); m_weight_matrices_inference[i].set_data(inference_params + current_pos); m_weight_matrices_backward[i].set_data((m_use_feedback_alignment ? backward_params : params) + current_pos); m_weight_matrices_full_precision[i].set_data(params_full_precision + current_pos); m_gradient_matrices[i].set_data(gradients + current_pos); current_pos += m_weight_matrices[i].n_elements(); } for (size_t i = 0; i < m_weight_matrices_full_precision.size(); ++i) { if (m_activation == Activation::Sine) { if (i == 0) { m_weight_matrices_full_precision[i].initialize_siren_uniform_first(rnd, scale); } else { m_weight_matrices_full_precision[i].initialize_siren_uniform(rnd, scale); } } else if (m_use_feedback_alignment) { m_weight_matrices_full_precision[i].initialize_fa_uniform_forward(rnd, scale); } else { m_weight_matrices_full_precision[i].initialize_xavier_uniform(rnd, scale); } } // Initialize backward params for feedback alignment if (m_use_feedback_alignment) { for (size_t i = 0; i < m_weight_matrices_backward.size(); ++i) { m_weight_matrices_backward[i].initialize_fa_uniform_backward(rnd, scale); } } } template class FullyFusedMLP<network_precision_t, 256>; template class FullyFusedMLP<network_precision_t, 128>; template class FullyFusedMLP<network_precision_t, 64>; template class FullyFusedMLP<network_precision_t, 32>; TCNN_NAMESPACE_END
e47b52901ddf3b4b82b2b74fdc8fad9c103df5dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the9(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * the3(tw*z,r) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } /* derivative of helva, from Mathematica */ __device__ hipComplex helvp(hipComplex z) { hipComplex out(jnf(2,z.r),jnf(1,z.i)); return out; } __device__ hipComplex lanna(hipComplex z) { hipComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r))); return out; } __device__ hipComplex harva(hipComplex z) { hipComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i)); return out; } __device__ hipComplex herve(hipComplex z) { hipComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r)); return out; } __device__ hipComplex alver(hipComplex z) { hipComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex alvir(hipComplex z) { hipComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex hexva(int m, hipComplex z) { hipComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex halvi(hipComplex z) { hipComplex out(j1f(z.r),-j0f(z.i)); return out; } __device__ hipComplex ahilv(hipComplex z) { hipComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ hipComplex halva(hipComplex z) { hipComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ hipComplex aciwa(hipComplex z) { hipComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex aliva(hipComplex z) { hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ hipComplex ariva(hipComplex z) { hipComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * helva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thy(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex origo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(alvir(q*z),alvir(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex origa(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(aciwa(q*z),aciwa(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex arth(hipComplex z, hipComplex q, hipComplex ao, hipComplex uo, hipComplex fic) { int v; hipComplex out=z; hipComplex lue(1.0,0.0); for(v=0;v<10;v++) { out = out - lanna(ao/(uo-lue))/hinva(uo/(ao+lue)); lue = lue * q; } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale =4.1; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex tin(1/1024,0.0); hipComplex aon = expc(tin*ai*moux); hipComplex uon= expc(tin*mouy); hipComplex flurn(0.0,0.0); hipComplex accume(1.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = unity; hipComplex vue = cue*ai; hipComplex lue = unity; hipComplex rhuva(3.0,0.0); hipComplex rarva(3.0,0.0); hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); hipComplex pnx(0.0,0.0); hipComplex pny(0.0,0.0); hipComplex mx = moux; hipComplex my = mouy; hipComplex lx=pnx; hipComplex ly=pny; ; /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); //arth(hipComplex z, hipComplex q, hipComplex ao, hipComplex uo, hipComplex fic) // For Questular, Let's see what happens with one of Ramanujan's mock theta functions? //cue = thy(q,flat(accume)*fixon); for(v=0;v<6;v++) { mx = mx - (hilva(aon*ly)-uon*mx)/(uon*aciwa(aon*mx)-aon); lx = lx + (hilva(aon*q)-uon*mx); my = my - (halva(aon*mx)-uon*my)/(uon*aciwa(aon*my)-aon); ly = ly + (halva(uon*q)-aon*my); if(norg(cue)>1) { cue = conj(unity/cue); } cue = cue - lx*(rampsi(aon*cue)-uon*ly*cue)/(uon*ramchi(aon*cue)-aon*lx); } /*cue =cue - powc(conj(cue),conj(cue-aon*conj(cue)))-powc(conj(cue),conj(cue-uon*conj(cue)));*/ double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
e47b52901ddf3b4b82b2b74fdc8fad9c103df5dd.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the9(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * the3(tw*z,r) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } /* derivative of helva, from Mathematica */ __device__ cuComplex helvp(cuComplex z) { cuComplex out(jnf(2,z.r),jnf(1,z.i)); return out; } __device__ cuComplex lanna(cuComplex z) { cuComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r))); return out; } __device__ cuComplex harva(cuComplex z) { cuComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i)); return out; } __device__ cuComplex herve(cuComplex z) { cuComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r)); return out; } __device__ cuComplex alver(cuComplex z) { cuComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex alvir(cuComplex z) { cuComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex hexva(int m, cuComplex z) { cuComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex halvi(cuComplex z) { cuComplex out(j1f(z.r),-j0f(z.i)); return out; } __device__ cuComplex ahilv(cuComplex z) { cuComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ cuComplex halva(cuComplex z) { cuComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ cuComplex aciwa(cuComplex z) { cuComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex aliva(cuComplex z) { cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ cuComplex ariva(cuComplex z) { cuComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * helva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thy(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex origo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(alvir(q*z),alvir(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex origa(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(aciwa(q*z),aciwa(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex arth(cuComplex z, cuComplex q, cuComplex ao, cuComplex uo, cuComplex fic) { int v; cuComplex out=z; cuComplex lue(1.0,0.0); for(v=0;v<10;v++) { out = out - lanna(ao/(uo-lue))/hinva(uo/(ao+lue)); lue = lue * q; } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale =4.1; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex tin(1/1024,0.0); cuComplex aon = expc(tin*ai*moux); cuComplex uon= expc(tin*mouy); cuComplex flurn(0.0,0.0); cuComplex accume(1.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = unity; cuComplex vue = cue*ai; cuComplex lue = unity; cuComplex rhuva(3.0,0.0); cuComplex rarva(3.0,0.0); cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); cuComplex pnx(0.0,0.0); cuComplex pny(0.0,0.0); cuComplex mx = moux; cuComplex my = mouy; cuComplex lx=pnx; cuComplex ly=pny; ; /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); //arth(cuComplex z, cuComplex q, cuComplex ao, cuComplex uo, cuComplex fic) // For Questular, Let's see what happens with one of Ramanujan's mock theta functions? //cue = thy(q,flat(accume)*fixon); for(v=0;v<6;v++) { mx = mx - (hilva(aon*ly)-uon*mx)/(uon*aciwa(aon*mx)-aon); lx = lx + (hilva(aon*q)-uon*mx); my = my - (halva(aon*mx)-uon*my)/(uon*aciwa(aon*my)-aon); ly = ly + (halva(uon*q)-aon*my); if(norg(cue)>1) { cue = conj(unity/cue); } cue = cue - lx*(rampsi(aon*cue)-uon*ly*cue)/(uon*ramchi(aon*cue)-aon*lx); } /*cue =cue - powc(conj(cue),conj(cue-aon*conj(cue)))-powc(conj(cue),conj(cue-uon*conj(cue)));*/ double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
5cc0c7f435f230ec56283f813bf39cb59131b27e.hip
// !!! This is a file automatically generated by hipify!!! #include "../include/Compute_On_GPU.cuh" #include "../include/Sci_Constant.h" #include "../include/Init_First_Second.cuh" #include "../include/Runge_Kutta.cuh" #include "../include/Laser.cuh" #include <cstdlib> #include <hip/hip_runtime.h> #include <cstdio> __global__ void pairs_init(particle_pair* pair_array, const size_t size, const double min_r, const double min_p) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < size) { distribution(pair_array[idx].first, pair_array[idx].second, idx, min_r, min_p); } return; } __global__ void pairs_first_step_every_step( particle_pair& init_one_pairs,particle_pair* every_step_arr ,size_t size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if( idx <size) { double t = 0; for(int i = 0 ;i<size ;i++) { fill_every_step(init_one_pairs.first, init_one_pairs.second,every_step_arr[i],t); t = t + DX; } } } __global__ void pairs_first_step_on_gpu(particle_pair* first_step_pair_array, const size_t size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx<size) { for (int i = 0; i < one_steps; i++) update_step_one(first_step_pair_array[idx].first, first_step_pair_array[idx].second); } } __global__ void pre_second_step_qq(double * QQ_array) { size_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < 2 * two_steps) { QQ_array[idx] = compute_qq_single(idx); } } __global__ void pre_second_step_E_arr_check (const double* E1_array, const double* E2_array, double* E_check_array) { size_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < 2 * two_steps) { E_check_array[idx] = compute_e_for_check(idx, E1_array[idx], E2_array[idx]); } } __global__ void pre_second_step_e1_arr(const double* QQ_array, const double EE0, double* E1_array) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < 2 * two_steps) { E1_array[idx] = compute_e1_single(idx, QQ_array[idx], EE0); } } __global__ void pre_second_step_e2_arr(const double* QQ_array, const double EE0, double* E2_array) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < 2 * two_steps) { E2_array[idx] = compute_e2_single(idx, QQ_array[idx], EE0); } } __global__ void pairs_second_step_on_gpu_every_step (particle_pair* second_arr, const size_t size, double* E1_array, double* E2_array, particle_pair* every_step_arr) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; double4 e1_laser = make_double4(0.0, 0.0, 0.0, 0.0); double4 e2_laser = make_double4(0.0, 0.0, 0.0, 0.0); int idx_of_laser = -1; // nn //double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4 = 0.0; //double now_t = 0.0; //t(1) if (idx < size) { for (int i = 0; i < two_steps; i++) { if (idx_of_laser == -1) { e1_laser = make_double4(0.0, E1_array[0], E1_array[0], E1_array[1]); e2_laser = make_double4(0.0, E2_array[0], E2_array[0], E2_array[1]); } else { e1_laser = make_double4(E1_array[idx_of_laser], E1_array[idx_of_laser + 1], E1_array[idx_of_laser + 1], E1_array[idx_of_laser + 2]); e2_laser = make_double4(E2_array[idx_of_laser], E2_array[idx_of_laser + 1], E2_array[idx_of_laser + 1], E2_array[idx_of_laser + 2]); } idx_of_laser += 2; // update_step_two_every_step(second_arr[0].first, second_arr[0].second, // e1_laser, e2_laser, every_step_arr[i]); } } } __global__ void pairs_second_step_on_gpu (particle_pair* second_arr, const size_t size, double* E1_array, double* E2_array) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; double4 e1_laser = make_double4(0.0, 0.0,0.0,0.0); double4 e2_laser = make_double4(0.0, 0.0,0.0,0.0); int idx_of_laser = -1; // nn //double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4 = 0.0; //double now_t = 0.0; //t(1) if (idx<size) { for (int i = 0; i < two_steps; i++) { if(idx_of_laser == -1 ) { e1_laser = make_double4(0.0,E1_array[0],E1_array[0],E1_array[1]); e2_laser = make_double4(0.0,E2_array[0],E2_array[0],E2_array[1]); } else { e1_laser = make_double4(E1_array[idx_of_laser],E1_array[idx_of_laser + 1],E1_array[idx_of_laser + 1],E1_array[idx_of_laser +2]); e2_laser = make_double4(E2_array[idx_of_laser],E2_array[idx_of_laser + 1],E2_array[idx_of_laser + 1],E2_array[idx_of_laser +2]); } idx_of_laser += 2; update_step_two(second_arr[idx].first, second_arr[idx].second, e1_laser, e2_laser); /*// t1 = now_t; if (t1 == 0) { e1_laser_t1 = 0.0; e2_laser_t1 = 0.0; } else { idx_of_ds = (2.0 * t1) / DX - 1; e1_laser_t1 = E1_array[idx_of_ds]; e2_laser_t1 = E2_array[idx_of_ds]; } // t2 = now_t + DX / 2.0; idx_of_ds = 2.0 * t2 / DX - 1; e1_laser_t2 = E1_array[idx_of_ds]; e2_laser_t2 = E2_array[idx_of_ds]; // t3 = now_t + DX / 2.0; idx_of_ds = 2 * t3 / DX - 1; e1_laser_t3 = E1_array[idx_of_ds]; e2_laser_t3 = E2_array[idx_of_ds]; // t4 = now_t + DX; idx_of_ds = 2.0 * t4 / DX - 1; e1_laser_t4 = E1_array[idx_of_ds]; e2_laser_t4 = E2_array[idx_of_ds]; double4 e1_laser = make_double4(e1_laser_t1, e1_laser_t2, e1_laser_t3, e1_laser_t4); double4 e2_laser = make_double4(e2_laser_t1, e2_laser_t2, e2_laser_t3, e2_laser_t4); now_t = now_t + DX;*/ /**/ } } } __global__ void pairs_second_step_on_gpu_fliter (const particle_pair* second_step_pair_array, particle_pair* second_step_pair_array_filter, const size_t size, unsigned long long* count_z, unsigned long long* count_zz) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < size) { double ee1 = CalculationE1(second_step_pair_array[idx].first, second_step_pair_array[idx].second); double ee2 = CalculationE2(second_step_pair_array[idx].first, second_step_pair_array[idx].second); if (ee1*ee2 < 0) { atomicAdd(count_z, 1); } if ((ee1 > 0) && (ee2 > 0)) { size_t temp_idx = atomicAdd(count_zz, 1); particle_pair temp; temp.first = second_step_pair_array[idx].first; temp.second = second_step_pair_array[idx].second; second_step_pair_array_filter[temp_idx - 1] = temp; } } }
5cc0c7f435f230ec56283f813bf39cb59131b27e.cu
#include "../include/Compute_On_GPU.cuh" #include "../include/Sci_Constant.h" #include "../include/Init_First_Second.cuh" #include "../include/Runge_Kutta.cuh" #include "../include/Laser.cuh" #include <cstdlib> #include <cuda_runtime.h> #include <cstdio> __global__ void pairs_init(particle_pair* pair_array, const size_t size, const double min_r, const double min_p) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < size) { distribution(pair_array[idx].first, pair_array[idx].second, idx, min_r, min_p); } return; } __global__ void pairs_first_step_every_step( particle_pair& init_one_pairs,particle_pair* every_step_arr ,size_t size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if( idx <size) { double t = 0; for(int i = 0 ;i<size ;i++) { fill_every_step(init_one_pairs.first, init_one_pairs.second,every_step_arr[i],t); t = t + DX; } } } __global__ void pairs_first_step_on_gpu(particle_pair* first_step_pair_array, const size_t size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx<size) { for (int i = 0; i < one_steps; i++) update_step_one(first_step_pair_array[idx].first, first_step_pair_array[idx].second); } } __global__ void pre_second_step_qq(double * QQ_array) { size_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < 2 * two_steps) { QQ_array[idx] = compute_qq_single(idx); } } __global__ void pre_second_step_E_arr_check (const double* E1_array, const double* E2_array, double* E_check_array) { size_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < 2 * two_steps) { E_check_array[idx] = compute_e_for_check(idx, E1_array[idx], E2_array[idx]); } } __global__ void pre_second_step_e1_arr(const double* QQ_array, const double EE0, double* E1_array) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < 2 * two_steps) { E1_array[idx] = compute_e1_single(idx, QQ_array[idx], EE0); } } __global__ void pre_second_step_e2_arr(const double* QQ_array, const double EE0, double* E2_array) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < 2 * two_steps) { E2_array[idx] = compute_e2_single(idx, QQ_array[idx], EE0); } } __global__ void pairs_second_step_on_gpu_every_step (particle_pair* second_arr, const size_t size, double* E1_array, double* E2_array, particle_pair* every_step_arr) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; double4 e1_laser = make_double4(0.0, 0.0, 0.0, 0.0); double4 e2_laser = make_double4(0.0, 0.0, 0.0, 0.0); int idx_of_laser = -1; // 相当于nn //double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4 = 0.0; //double now_t = 0.0; //当前时间,相当于t(1) if (idx < size) { for (int i = 0; i < two_steps; i++) { if (idx_of_laser == -1) { e1_laser = make_double4(0.0, E1_array[0], E1_array[0], E1_array[1]); e2_laser = make_double4(0.0, E2_array[0], E2_array[0], E2_array[1]); } else { e1_laser = make_double4(E1_array[idx_of_laser], E1_array[idx_of_laser + 1], E1_array[idx_of_laser + 1], E1_array[idx_of_laser + 2]); e2_laser = make_double4(E2_array[idx_of_laser], E2_array[idx_of_laser + 1], E2_array[idx_of_laser + 1], E2_array[idx_of_laser + 2]); } idx_of_laser += 2; // update_step_two_every_step(second_arr[0].first, second_arr[0].second, // e1_laser, e2_laser, every_step_arr[i]); } } } __global__ void pairs_second_step_on_gpu (particle_pair* second_arr, const size_t size, double* E1_array, double* E2_array) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; double4 e1_laser = make_double4(0.0, 0.0,0.0,0.0); double4 e2_laser = make_double4(0.0, 0.0,0.0,0.0); int idx_of_laser = -1; // 相当于nn //double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4 = 0.0; //double now_t = 0.0; //当前时间,相当于t(1) if (idx<size) { for (int i = 0; i < two_steps; i++) { if(idx_of_laser == -1 ) { e1_laser = make_double4(0.0,E1_array[0],E1_array[0],E1_array[1]); e2_laser = make_double4(0.0,E2_array[0],E2_array[0],E2_array[1]); } else { e1_laser = make_double4(E1_array[idx_of_laser],E1_array[idx_of_laser + 1],E1_array[idx_of_laser + 1],E1_array[idx_of_laser +2]); e2_laser = make_double4(E2_array[idx_of_laser],E2_array[idx_of_laser + 1],E2_array[idx_of_laser + 1],E2_array[idx_of_laser +2]); } idx_of_laser += 2; update_step_two(second_arr[idx].first, second_arr[idx].second, e1_laser, e2_laser); /*//第一个激光场强度 t1 = now_t; if (t1 == 0) { e1_laser_t1 = 0.0; e2_laser_t1 = 0.0; } else { idx_of_ds = (2.0 * t1) / DX - 1; e1_laser_t1 = E1_array[idx_of_ds]; e2_laser_t1 = E2_array[idx_of_ds]; } //第二个激光场强度 t2 = now_t + DX / 2.0; idx_of_ds = 2.0 * t2 / DX - 1; e1_laser_t2 = E1_array[idx_of_ds]; e2_laser_t2 = E2_array[idx_of_ds]; //第三个激光场强度 t3 = now_t + DX / 2.0; idx_of_ds = 2 * t3 / DX - 1; e1_laser_t3 = E1_array[idx_of_ds]; e2_laser_t3 = E2_array[idx_of_ds]; //第四个激光场强度 t4 = now_t + DX; idx_of_ds = 2.0 * t4 / DX - 1; e1_laser_t4 = E1_array[idx_of_ds]; e2_laser_t4 = E2_array[idx_of_ds]; double4 e1_laser = make_double4(e1_laser_t1, e1_laser_t2, e1_laser_t3, e1_laser_t4); double4 e2_laser = make_double4(e2_laser_t1, e2_laser_t2, e2_laser_t3, e2_laser_t4); now_t = now_t + DX;*/ /**/ } } } __global__ void pairs_second_step_on_gpu_fliter (const particle_pair* second_step_pair_array, particle_pair* second_step_pair_array_filter, const size_t size, unsigned long long* count_z, unsigned long long* count_zz) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < size) { double ee1 = CalculationE1(second_step_pair_array[idx].first, second_step_pair_array[idx].second); double ee2 = CalculationE2(second_step_pair_array[idx].first, second_step_pair_array[idx].second); if (ee1*ee2 < 0) { atomicAdd(count_z, 1); } if ((ee1 > 0) && (ee2 > 0)) { size_t temp_idx = atomicAdd(count_zz, 1); particle_pair temp; temp.first = second_step_pair_array[idx].first; temp.second = second_step_pair_array[idx].second; second_step_pair_array_filter[temp_idx - 1] = temp; } } }
fd2541cbebe0f1f7ab4f0b80a7fc061091c19015.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "poisson2d.hpp" #include "timer.hpp" #include <algorithm> #include <iostream> #include <fstream> #include <vector> // DEFINES #define EX "ex10" #define CSV_NAME "ph_data_cuda.csv" #define N_MAX_PRINT 32 #define PRINT_ONLY 10 #define NUM_TESTS 10 // should be uneven so we dont have to copy after each iteration #define GRID_SIZE 512 #define BLOCK_SIZE 512 #define USE_MY_ATOMIC_ADD /** atomicAdd for doubles for hip for nvcc for many cores exercise 10 for me * by: Peter HOLZNER feat. NVIDIA * * - Ref: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions * * 'Don't let your memes be dreams!' * - Probably Ghandi, idk */ __device__ double my_atomic_Add(double* address, double val) { using ulli = unsigned long long int; ulli* address_as_ull = (ulli*)address; ulli old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); }; /** y = A * x */ __global__ void cuda_csr_matvec_product(int N, int *csr_rowoffsets, int *csr_colindices, double *csr_values, double *x, double *y) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { double sum = 0; for (int k = csr_rowoffsets[i]; k < csr_rowoffsets[i + 1]; k++) { sum += csr_values[k] * x[csr_colindices[k]]; } y[i] = sum; } } /** x <- x + alpha * y */ __global__ void cuda_vecadd(int N, double *x, double *y, double alpha) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) x[i] += alpha * y[i]; } /** x <- y + alpha * x */ __global__ void cuda_vecadd2(int N, double *x, double *y, double alpha) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) x[i] = y[i] + alpha * x[i]; } /**result = (x, y) */ __global__ void cuda_dot_product(int N, double *x, double *y, double *result) { __shared__ double shared_mem[BLOCK_SIZE]; double dot = 0; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { dot += x[i] * y[i]; } shared_mem[threadIdx.x] = dot; for (int k = blockDim.x / 2; k > 0; k /= 2) { __syncthreads(); if (threadIdx.x < k) { shared_mem[threadIdx.x] += shared_mem[threadIdx.x + k]; } } if (threadIdx.x == 0) { #ifdef USE_MY_ATOMIC_ADD my_atomic_Add(result, shared_mem[0]); #else atomicAdd(result, shared_mem[0]); #endif } } /** Implementation of the conjugate gradient algorithm. * * The control flow is handled by the CPU. * Only the individual operations (vector updates, dot products, sparse * matrix-vector product) are transferred to CUDA kernels. * * The temporary arrays p, r, and Ap need to be allocated on the GPU for use * with CUDA. Modify as you see fit. * * Modifications: * returns runtime as double * iteration counter (iters) is passed as reference for logging to csv-file */ double conjugate_gradient(int N, // number of unknows int *csr_rowoffsets, int *csr_colindices, double *csr_values, double *rhs, double *solution, int& iters) //, double *init_guess) // feel free to add a nonzero initial guess as needed { // initialize timer Timer timer; // clear solution vector (it may contain garbage values): std::fill(solution, solution + N, 0); // initialize work vectors: double alpha, beta; double *cuda_solution, *cuda_p, *cuda_r, *cuda_Ap, *cuda_scalar; hipMalloc(&cuda_p, sizeof(double) * N); hipMalloc(&cuda_r, sizeof(double) * N); hipMalloc(&cuda_Ap, sizeof(double) * N); hipMalloc(&cuda_solution, sizeof(double) * N); hipMalloc(&cuda_scalar, sizeof(double)); hipMemcpy(cuda_p, rhs, sizeof(double) * N, hipMemcpyHostToDevice); hipMemcpy(cuda_r, rhs, sizeof(double) * N, hipMemcpyHostToDevice); hipMemcpy(cuda_solution, solution, sizeof(double) * N, hipMemcpyHostToDevice); const double zero = 0; double residual_norm_squared = 0; hipMemcpy(cuda_scalar, &zero, sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cuda_dot_product), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_r, cuda_r, cuda_scalar); hipMemcpy(&residual_norm_squared, cuda_scalar, sizeof(double), hipMemcpyDeviceToHost); double initial_residual_squared = residual_norm_squared; iters = 0; // it's passed in from the outside hipDeviceSynchronize(); timer.reset(); while (1) { // line 4: A*p: hipLaunchKernelGGL(( cuda_csr_matvec_product), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, csr_rowoffsets, csr_colindices, csr_values, cuda_p, cuda_Ap); // lines 5,6: hipMemcpy(cuda_scalar, &zero, sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cuda_dot_product), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_p, cuda_Ap, cuda_scalar); hipMemcpy(&alpha, cuda_scalar, sizeof(double), hipMemcpyDeviceToHost); alpha = residual_norm_squared / alpha; // line 7: hipLaunchKernelGGL(( cuda_vecadd), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_solution, cuda_p, alpha); // line 8: hipLaunchKernelGGL(( cuda_vecadd), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_r, cuda_Ap, -alpha); // line 9: beta = residual_norm_squared; hipMemcpy(cuda_scalar, &zero, sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cuda_dot_product), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_r, cuda_r, cuda_scalar); hipMemcpy(&residual_norm_squared, cuda_scalar, sizeof(double), hipMemcpyDeviceToHost); // line 10: if (std::sqrt(residual_norm_squared / initial_residual_squared) < 1e-6) { break; } // line 11: beta = residual_norm_squared / beta; // line 12: hipLaunchKernelGGL(( cuda_vecadd2), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_p, cuda_r, beta); if (iters > 10000) break; // solver didn't converge ++iters; } hipMemcpy(solution, cuda_solution, sizeof(double) * N, hipMemcpyDeviceToHost); hipDeviceSynchronize(); double runtime = timer.get(); std::cout << "Time elapsed: " << runtime << " (" << runtime / iters << " per iteration)" << std::endl; if (iters > 10000) std::cout << "Conjugate Gradient did NOT converge within 10000 iterations" << std::endl; else std::cout << "Conjugate Gradient converged in " << iters << " iterations." << std::endl; hipFree(cuda_p); hipFree(cuda_r); hipFree(cuda_Ap); hipFree(cuda_solution); hipFree(cuda_scalar); return runtime; } /** Solve a system with `points_per_direction * points_per_direction` unknowns */ void solve_system(int points_per_direction) { int N = points_per_direction * points_per_direction; // number of unknows to solve for std::cout << "Solving Ax=b with " << N << " unknowns." << std::endl; // // Allocate CSR arrays. // // Note: Usually one does not know the number of nonzeros in the system matrix // a-priori. // For this exercise, however, we know that there are at most 5 nonzeros // per row in the system matrix, so we can allocate accordingly. // int *csr_rowoffsets = (int *)malloc(sizeof(double) * (N + 1)); int *csr_colindices = (int *)malloc(sizeof(double) * 5 * N); double *csr_values = (double *)malloc(sizeof(double) * 5 * N); int *cuda_csr_rowoffsets, *cuda_csr_colindices; double *cuda_csr_values; // // fill CSR matrix with values // generate_fdm_laplace(points_per_direction, csr_rowoffsets, csr_colindices, csr_values); // // Allocate solution vector and right hand side: // double *solution = (double *)malloc(sizeof(double) * N); double *rhs = (double *)malloc(sizeof(double) * N); std::fill(rhs, rhs + N, 1); // // Allocate CUDA-arrays // // hipMalloc(&cuda_csr_rowoffsets, sizeof(double) * (N + 1)); hipMalloc(&cuda_csr_colindices, sizeof(double) * 5 * N); hipMalloc(&cuda_csr_values, sizeof(double) * 5 * N); hipMemcpy(cuda_csr_rowoffsets, csr_rowoffsets, sizeof(double) * (N + 1), hipMemcpyHostToDevice); hipMemcpy(cuda_csr_colindices, csr_colindices, sizeof(double) * 5 * N, hipMemcpyHostToDevice); hipMemcpy(cuda_csr_values, csr_values, sizeof(double) * 5 * N, hipMemcpyHostToDevice); // // Call Conjugate Gradient implementation with GPU arrays // int iters = 0; // pass into the CG so we can track it double runtime = conjugate_gradient(N, cuda_csr_rowoffsets, cuda_csr_colindices, cuda_csr_values, rhs, solution, iters); // // Check for convergence: // double residual_norm = relative_residual(N, csr_rowoffsets, csr_colindices, csr_values, rhs, solution); std::cout << "Relative residual norm: " << residual_norm << " (should be smaller than 1e-6)" << std::endl; // not optimal (efficient), but minimally invasive --> easy to copy std::ofstream csv; csv.open(CSV_NAME, std::fstream::out | std::fstream::app); csv << points_per_direction << ";" << N << ";" << runtime << ";" << residual_norm << ";" << iters << std::endl; csv.close(); hipFree(cuda_csr_rowoffsets); hipFree(cuda_csr_colindices); hipFree(cuda_csr_values); free(solution); free(rhs); free(csr_rowoffsets); free(csr_colindices); free(csr_values); } int main() { std::ofstream csv; csv.open(CSV_NAME, std::fstream::out | std::fstream::trunc); csv << "p;N;runtime;residual;iterations" << std::endl; csv.close(); std::vector<int> p_per_dir{ 10, 100, 500,1000, 1500}; for (auto& p : p_per_dir) { std::cout << "--------------------------" << std::endl; solve_system(p); // solves a system with p*p unknowns } std::cout << "\nData: https://gtx1080.360252.org/2020/" << EX << "/" << CSV_NAME; return EXIT_SUCCESS; }
fd2541cbebe0f1f7ab4f0b80a7fc061091c19015.cu
#include "poisson2d.hpp" #include "timer.hpp" #include <algorithm> #include <iostream> #include <fstream> #include <vector> // DEFINES #define EX "ex10" #define CSV_NAME "ph_data_cuda.csv" #define N_MAX_PRINT 32 #define PRINT_ONLY 10 #define NUM_TESTS 10 // should be uneven so we dont have to copy after each iteration #define GRID_SIZE 512 #define BLOCK_SIZE 512 #define USE_MY_ATOMIC_ADD /** atomicAdd for doubles for hip for nvcc for many cores exercise 10 for me * by: Peter HOLZNER feat. NVIDIA * * - Ref: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions * * 'Don't let your memes be dreams!' * - Probably Ghandi, idk */ __device__ double my_atomic_Add(double* address, double val) { using ulli = unsigned long long int; ulli* address_as_ull = (ulli*)address; ulli old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); }; /** y = A * x */ __global__ void cuda_csr_matvec_product(int N, int *csr_rowoffsets, int *csr_colindices, double *csr_values, double *x, double *y) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { double sum = 0; for (int k = csr_rowoffsets[i]; k < csr_rowoffsets[i + 1]; k++) { sum += csr_values[k] * x[csr_colindices[k]]; } y[i] = sum; } } /** x <- x + alpha * y */ __global__ void cuda_vecadd(int N, double *x, double *y, double alpha) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) x[i] += alpha * y[i]; } /** x <- y + alpha * x */ __global__ void cuda_vecadd2(int N, double *x, double *y, double alpha) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) x[i] = y[i] + alpha * x[i]; } /**result = (x, y) */ __global__ void cuda_dot_product(int N, double *x, double *y, double *result) { __shared__ double shared_mem[BLOCK_SIZE]; double dot = 0; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { dot += x[i] * y[i]; } shared_mem[threadIdx.x] = dot; for (int k = blockDim.x / 2; k > 0; k /= 2) { __syncthreads(); if (threadIdx.x < k) { shared_mem[threadIdx.x] += shared_mem[threadIdx.x + k]; } } if (threadIdx.x == 0) { #ifdef USE_MY_ATOMIC_ADD my_atomic_Add(result, shared_mem[0]); #else atomicAdd(result, shared_mem[0]); #endif } } /** Implementation of the conjugate gradient algorithm. * * The control flow is handled by the CPU. * Only the individual operations (vector updates, dot products, sparse * matrix-vector product) are transferred to CUDA kernels. * * The temporary arrays p, r, and Ap need to be allocated on the GPU for use * with CUDA. Modify as you see fit. * * Modifications: * returns runtime as double * iteration counter (iters) is passed as reference for logging to csv-file */ double conjugate_gradient(int N, // number of unknows int *csr_rowoffsets, int *csr_colindices, double *csr_values, double *rhs, double *solution, int& iters) //, double *init_guess) // feel free to add a nonzero initial guess as needed { // initialize timer Timer timer; // clear solution vector (it may contain garbage values): std::fill(solution, solution + N, 0); // initialize work vectors: double alpha, beta; double *cuda_solution, *cuda_p, *cuda_r, *cuda_Ap, *cuda_scalar; cudaMalloc(&cuda_p, sizeof(double) * N); cudaMalloc(&cuda_r, sizeof(double) * N); cudaMalloc(&cuda_Ap, sizeof(double) * N); cudaMalloc(&cuda_solution, sizeof(double) * N); cudaMalloc(&cuda_scalar, sizeof(double)); cudaMemcpy(cuda_p, rhs, sizeof(double) * N, cudaMemcpyHostToDevice); cudaMemcpy(cuda_r, rhs, sizeof(double) * N, cudaMemcpyHostToDevice); cudaMemcpy(cuda_solution, solution, sizeof(double) * N, cudaMemcpyHostToDevice); const double zero = 0; double residual_norm_squared = 0; cudaMemcpy(cuda_scalar, &zero, sizeof(double), cudaMemcpyHostToDevice); cuda_dot_product<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_r, cuda_r, cuda_scalar); cudaMemcpy(&residual_norm_squared, cuda_scalar, sizeof(double), cudaMemcpyDeviceToHost); double initial_residual_squared = residual_norm_squared; iters = 0; // it's passed in from the outside cudaDeviceSynchronize(); timer.reset(); while (1) { // line 4: A*p: cuda_csr_matvec_product<<<GRID_SIZE, BLOCK_SIZE>>>(N, csr_rowoffsets, csr_colindices, csr_values, cuda_p, cuda_Ap); // lines 5,6: cudaMemcpy(cuda_scalar, &zero, sizeof(double), cudaMemcpyHostToDevice); cuda_dot_product<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_p, cuda_Ap, cuda_scalar); cudaMemcpy(&alpha, cuda_scalar, sizeof(double), cudaMemcpyDeviceToHost); alpha = residual_norm_squared / alpha; // line 7: cuda_vecadd<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_solution, cuda_p, alpha); // line 8: cuda_vecadd<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_r, cuda_Ap, -alpha); // line 9: beta = residual_norm_squared; cudaMemcpy(cuda_scalar, &zero, sizeof(double), cudaMemcpyHostToDevice); cuda_dot_product<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_r, cuda_r, cuda_scalar); cudaMemcpy(&residual_norm_squared, cuda_scalar, sizeof(double), cudaMemcpyDeviceToHost); // line 10: if (std::sqrt(residual_norm_squared / initial_residual_squared) < 1e-6) { break; } // line 11: beta = residual_norm_squared / beta; // line 12: cuda_vecadd2<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_p, cuda_r, beta); if (iters > 10000) break; // solver didn't converge ++iters; } cudaMemcpy(solution, cuda_solution, sizeof(double) * N, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); double runtime = timer.get(); std::cout << "Time elapsed: " << runtime << " (" << runtime / iters << " per iteration)" << std::endl; if (iters > 10000) std::cout << "Conjugate Gradient did NOT converge within 10000 iterations" << std::endl; else std::cout << "Conjugate Gradient converged in " << iters << " iterations." << std::endl; cudaFree(cuda_p); cudaFree(cuda_r); cudaFree(cuda_Ap); cudaFree(cuda_solution); cudaFree(cuda_scalar); return runtime; } /** Solve a system with `points_per_direction * points_per_direction` unknowns */ void solve_system(int points_per_direction) { int N = points_per_direction * points_per_direction; // number of unknows to solve for std::cout << "Solving Ax=b with " << N << " unknowns." << std::endl; // // Allocate CSR arrays. // // Note: Usually one does not know the number of nonzeros in the system matrix // a-priori. // For this exercise, however, we know that there are at most 5 nonzeros // per row in the system matrix, so we can allocate accordingly. // int *csr_rowoffsets = (int *)malloc(sizeof(double) * (N + 1)); int *csr_colindices = (int *)malloc(sizeof(double) * 5 * N); double *csr_values = (double *)malloc(sizeof(double) * 5 * N); int *cuda_csr_rowoffsets, *cuda_csr_colindices; double *cuda_csr_values; // // fill CSR matrix with values // generate_fdm_laplace(points_per_direction, csr_rowoffsets, csr_colindices, csr_values); // // Allocate solution vector and right hand side: // double *solution = (double *)malloc(sizeof(double) * N); double *rhs = (double *)malloc(sizeof(double) * N); std::fill(rhs, rhs + N, 1); // // Allocate CUDA-arrays // // cudaMalloc(&cuda_csr_rowoffsets, sizeof(double) * (N + 1)); cudaMalloc(&cuda_csr_colindices, sizeof(double) * 5 * N); cudaMalloc(&cuda_csr_values, sizeof(double) * 5 * N); cudaMemcpy(cuda_csr_rowoffsets, csr_rowoffsets, sizeof(double) * (N + 1), cudaMemcpyHostToDevice); cudaMemcpy(cuda_csr_colindices, csr_colindices, sizeof(double) * 5 * N, cudaMemcpyHostToDevice); cudaMemcpy(cuda_csr_values, csr_values, sizeof(double) * 5 * N, cudaMemcpyHostToDevice); // // Call Conjugate Gradient implementation with GPU arrays // int iters = 0; // pass into the CG so we can track it double runtime = conjugate_gradient(N, cuda_csr_rowoffsets, cuda_csr_colindices, cuda_csr_values, rhs, solution, iters); // // Check for convergence: // double residual_norm = relative_residual(N, csr_rowoffsets, csr_colindices, csr_values, rhs, solution); std::cout << "Relative residual norm: " << residual_norm << " (should be smaller than 1e-6)" << std::endl; // not optimal (efficient), but minimally invasive --> easy to copy std::ofstream csv; csv.open(CSV_NAME, std::fstream::out | std::fstream::app); csv << points_per_direction << ";" << N << ";" << runtime << ";" << residual_norm << ";" << iters << std::endl; csv.close(); cudaFree(cuda_csr_rowoffsets); cudaFree(cuda_csr_colindices); cudaFree(cuda_csr_values); free(solution); free(rhs); free(csr_rowoffsets); free(csr_colindices); free(csr_values); } int main() { std::ofstream csv; csv.open(CSV_NAME, std::fstream::out | std::fstream::trunc); csv << "p;N;runtime;residual;iterations" << std::endl; csv.close(); std::vector<int> p_per_dir{ 10, 100, 500,1000, 1500}; for (auto& p : p_per_dir) { std::cout << "--------------------------" << std::endl; solve_system(p); // solves a system with p*p unknowns } std::cout << "\nData: https://gtx1080.360252.org/2020/" << EX << "/" << CSV_NAME; return EXIT_SUCCESS; }
20cddc77ec543a8a440fbdae51c041535b594d60.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"dropOutLayer.h" dropOutLayer::dropOutLayer(string name) { _name = name; _inputName = " "; srcData = NULL; dstData = NULL; nextLayer.clear(); prevLayer.clear(); outputPtr = NULL; configDropOut* curConfig = (configDropOut*) config::instanceObjtce()->getLayersByName(_name); string prevLayerName = curConfig->_input; layersBase* prev_Layer = (layersBase*) Layers::instanceObject()->getLayer(prevLayerName); inputAmount = prev_Layer->channels; inputImageDim = prev_Layer->height; number = prev_Layer->number; channels = prev_Layer->channels; height = prev_Layer->height; width = prev_Layer->width; outputSize = channels * height * width; DropOut_rate = curConfig->dropOut_rate; MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &outputPtr, number * channels * height * width * sizeof(float)); this->createHandles(); } dropOutLayer::dropOutLayer(dropOutLayer* layer) { srcData = NULL; dstData = NULL; nextLayer.clear(); prevLayer.clear(); outputPtr = NULL; static int idx = 0; _name = layer->_name + string("_") + int_to_string(idx); idx ++; _inputName = layer->_inputName; inputAmount = layer->inputAmount; inputImageDim = layer->inputImageDim; number = layer->number; channels = layer->channels; height = layer->height; width = layer->width; outputSize = layer->outputSize; DropOut_rate = layer->DropOut_rate; MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &outputPtr, number * channels * height * width * sizeof(float)); this->createHandles(); } void dropOutLayer::createHandles() { hiprandCreateGenerator(&curandGenerator_DropOut, HIPRAND_RNG_PSEUDO_MTGP32); } __global__ void dropout_train(float* data, float* outputPtr, int size, float probability) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < size) { if(outputPtr[idx] < probability) data[idx] = 0; } } __global__ void dropout_test(float* data, int size, float probability) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < size) { data[idx] = data[idx] * probability; } } void dropOutLayer::CreateUniform(int size) { hiprandSetPseudoRandomGeneratorSeed(curandGenerator_DropOut, time(NULL)); hiprandGenerateUniform(curandGenerator_DropOut, outputPtr, size); } void dropOutLayer::Dropout_TrainSet(float* data, int size, float dropout_rate) { int threadsPerBlock = 256; int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( dropout_train), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, data, outputPtr, size, dropout_rate); hipDeviceSynchronize(); } void dropOutLayer::Dropout_TestSet(float* data, int size, float dropout_rate) { int threadsPerBlock = 256; int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( dropout_test), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, srcData, number * channels * height * width, DropOut_rate); hipDeviceSynchronize(); } void dropOutLayer::forwardPropagation(string train_or_test) { srcData =prevLayer[0]->dstData; dstData = srcData; /*use dropout in training, when testing multiply probability*/ if(train_or_test == "train") { CreateUniform(number * channels * height * width); Dropout_TrainSet(srcData, number * channels * height * width, DropOut_rate); } else Dropout_TestSet(srcData, number * channels * height * width, DropOut_rate); } void dropOutLayer::backwardPropagation(float Momemtum) { int nIndex = m_nCurBranchIndex; diffData = nextLayer[nIndex]->diffData; Dropout_TrainSet(diffData, number * channels * height * width, DropOut_rate); } void dropOutLayer::destroyHandles() { hiprandDestroyGenerator(curandGenerator_DropOut); }
20cddc77ec543a8a440fbdae51c041535b594d60.cu
#include"dropOutLayer.h" dropOutLayer::dropOutLayer(string name) { _name = name; _inputName = " "; srcData = NULL; dstData = NULL; nextLayer.clear(); prevLayer.clear(); outputPtr = NULL; configDropOut* curConfig = (configDropOut*) config::instanceObjtce()->getLayersByName(_name); string prevLayerName = curConfig->_input; layersBase* prev_Layer = (layersBase*) Layers::instanceObject()->getLayer(prevLayerName); inputAmount = prev_Layer->channels; inputImageDim = prev_Layer->height; number = prev_Layer->number; channels = prev_Layer->channels; height = prev_Layer->height; width = prev_Layer->width; outputSize = channels * height * width; DropOut_rate = curConfig->dropOut_rate; MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &outputPtr, number * channels * height * width * sizeof(float)); this->createHandles(); } dropOutLayer::dropOutLayer(dropOutLayer* layer) { srcData = NULL; dstData = NULL; nextLayer.clear(); prevLayer.clear(); outputPtr = NULL; static int idx = 0; _name = layer->_name + string("_") + int_to_string(idx); idx ++; _inputName = layer->_inputName; inputAmount = layer->inputAmount; inputImageDim = layer->inputImageDim; number = layer->number; channels = layer->channels; height = layer->height; width = layer->width; outputSize = layer->outputSize; DropOut_rate = layer->DropOut_rate; MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &outputPtr, number * channels * height * width * sizeof(float)); this->createHandles(); } void dropOutLayer::createHandles() { curandCreateGenerator(&curandGenerator_DropOut, CURAND_RNG_PSEUDO_MTGP32); } __global__ void dropout_train(float* data, float* outputPtr, int size, float probability) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < size) { if(outputPtr[idx] < probability) data[idx] = 0; } } __global__ void dropout_test(float* data, int size, float probability) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < size) { data[idx] = data[idx] * probability; } } void dropOutLayer::CreateUniform(int size) { curandSetPseudoRandomGeneratorSeed(curandGenerator_DropOut, time(NULL)); curandGenerateUniform(curandGenerator_DropOut, outputPtr, size); } void dropOutLayer::Dropout_TrainSet(float* data, int size, float dropout_rate) { int threadsPerBlock = 256; int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock; dropout_train<<<blocksPerGrid, threadsPerBlock>>>(data, outputPtr, size, dropout_rate); cudaThreadSynchronize(); } void dropOutLayer::Dropout_TestSet(float* data, int size, float dropout_rate) { int threadsPerBlock = 256; int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock; dropout_test<<<blocksPerGrid, threadsPerBlock>>>(srcData, number * channels * height * width, DropOut_rate); cudaThreadSynchronize(); } void dropOutLayer::forwardPropagation(string train_or_test) { srcData =prevLayer[0]->dstData; dstData = srcData; /*use dropout in training, when testing multiply probability*/ if(train_or_test == "train") { CreateUniform(number * channels * height * width); Dropout_TrainSet(srcData, number * channels * height * width, DropOut_rate); } else Dropout_TestSet(srcData, number * channels * height * width, DropOut_rate); } void dropOutLayer::backwardPropagation(float Momemtum) { int nIndex = m_nCurBranchIndex; diffData = nextLayer[nIndex]->diffData; Dropout_TrainSet(diffData, number * channels * height * width, DropOut_rate); } void dropOutLayer::destroyHandles() { curandDestroyGenerator(curandGenerator_DropOut); }
cb881c6cea0a903f984b222ffd0d5e9b7242fc8e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2016, The Bifrost Authors. All rights reserved. * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of The Bifrost Authors nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* SgemmEx: f16.f16->f16 f16.f16->f32 i8.i8->f32 f32.f32->f32 *CgemmEx: ci8.ci8->cf32 [>= sm_50] cf32.cf32->cf32 [>= sm_50] DgemmEx: f64.f64->f64 ZgemmEx: cf64.cf64->cf64 *Cgemm3m: cf32.cf32->cf32 (Gauss) [>= sm_50] Zgemm3m: cf64.cf64->cf64 (Gauss) [>= sm_50] Cherk: cf32->cf32 CherkEx: ci8->cf32 [>= sm_50] cf32->cf32 [>= sm_50] *Cherk3mEx: ci8->cf32 (Gauss) [>= sm_50] cf32->cf32 (Gauss) [>= sm_50] # TODO: Start with: Cgemm (+preconvert to fp32) CgemmEx (8bit, cuda >= 8.0, >=sm_50) Cgemm3m (fp32, cuda >= 8.0, >=sm_50) Cherk (+preconvert to fp32) Cherk3mEx (8bit or fp32, cuda >= 8.0, >=sm_50) The preconvert paths should support ci4, ci8, ci16, fp16 The other paths should only be used if the dtype already matches Eventually it will probably be worth integrating the xGPU kernel, given the lack of cublasHerkEx (particularly the small-N problem). */ #include <bifrost/linalg.h> #include "linalg_kernels.h" #include "assert.hpp" #include "utils.hpp" #include "cuda.hpp" #include "cuda/stream.hpp" #include "ShapeIndexer.cuh" #include "trace.hpp" class BFlinalg_impl { hipblasHandle_t _cublas; // No copy-assign BFlinalg_impl(BFlinalg_impl const& ); BFlinalg_impl& operator=(BFlinalg_impl const& ); public: BFlinalg_impl() { BF_CHECK_CUBLAS_EXCEPTION(hipblasCreate(&_cublas)); } ~BFlinalg_impl() { if( _cublas ) { hipblasDestroy(_cublas); } } hipblasHandle_t cublas() const { return _cublas; } }; BFstatus bfMatMul_aa_exec(BFlinalg handle, hipStream_t stream, hipblasOperation_t trans, long n, long k, double alpha, void const* a_data, BFdtype a_type, long a_stride, double beta, void const* c_data, BFdtype c_type, long c_stride) { BF_TRACE_STREAM(stream); BF_CHECK_CUBLAS(hipblasSetStream(handle->cublas(), stream)); // Note: UPPER here means lower for row-major ordering hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER; BF_CHECK_CUBLAS(hipblasSetPointerMode(handle->cublas(), HIPBLAS_POINTER_MODE_HOST)); BF_ASSERT(a_data, BF_STATUS_INVALID_POINTER); BF_ASSERT(c_data, BF_STATUS_INVALID_POINTER); switch( a_type ) { case BF_DTYPE_F32: { BF_ASSERT(c_type == BF_DTYPE_F32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; BF_CHECK_CUBLAS(hipblasSsyrk(handle->cublas(), uplo, trans, n, k, &alpha_f, (float*)a_data, a_stride, &beta_f, (float*)c_data, c_stride)); break; } case BF_DTYPE_F64: { BF_ASSERT(c_type == BF_DTYPE_F64, BF_STATUS_UNSUPPORTED_DTYPE); BF_CHECK_CUBLAS(hipblasDsyrk(handle->cublas(), uplo, trans, n, k, &alpha, (double*)a_data, a_stride, &beta, (double*)c_data, c_stride)); break; } #if CUDART_VERSION >= 8000 case BF_DTYPE_CI8: { BF_ASSERT(c_type == BF_DTYPE_CF32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; if( get_cuda_device_cc() >= 50 ) { BF_CHECK_CUBLAS(hipblasCherk3mEx(handle->cublas(), uplo, trans, n, k, &alpha_f, (hipComplex*)a_data, HIP_C_8I, a_stride, &beta_f, (hipComplex*)c_data, HIP_C_32F, c_stride)); break; } BF_FAIL("Supported dtype for array a", BF_STATUS_UNSUPPORTED_DTYPE); } #endif case BF_DTYPE_CF32: { BF_ASSERT(c_type == BF_DTYPE_CF32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; #if CUDART_VERSION >= 8000 if( get_cuda_device_cc() >= 50 ) { BF_CHECK_CUBLAS(hipblasCherk3mEx(handle->cublas(), uplo, trans, n, k, &alpha_f, (hipComplex*)a_data, HIP_C_32F, a_stride, &beta_f, (hipComplex*)c_data, HIP_C_32F, c_stride)); break; } #endif BF_CHECK_CUBLAS(hipblasCherk(handle->cublas(), uplo, trans, n, k, &alpha_f, (hipComplex*)a_data, a_stride, &beta_f, (hipComplex*)c_data, c_stride)); break; } case BF_DTYPE_CF64: { BF_ASSERT(c_type == BF_DTYPE_CF64, BF_STATUS_UNSUPPORTED_DTYPE); BF_CHECK_CUBLAS(hipblasZherk(handle->cublas(), uplo, trans, n, k, &alpha, (hipDoubleComplex*)a_data, a_stride, &beta, (hipDoubleComplex*)c_data, c_stride)); break; } default: BF_FAIL("Supported dtype for array a", BF_STATUS_UNSUPPORTED_DTYPE); } return BF_STATUS_SUCCESS; } BFstatus bfMatMul_aa(BFlinalg handle, double alpha, BFarray const* a, double beta, BFarray const* c) { BF_TRACE(); BF_ASSERT(c->ndim == a->ndim, BF_STATUS_INVALID_SHAPE); int ndim = a->ndim; // Convert byte strides to element strides int shape[BF_MAX_DIMS]; int astrides[BF_MAX_DIMS]; int cstrides[BF_MAX_DIMS]; for( int d=0; d<ndim ; ++d ) { shape[d] = a->shape[d]; astrides[d] = a->strides[d]; cstrides[d] = c->strides[d]; } for( int d=0; d<ndim ; ++d ) { BF_ASSERT(astrides[d] % BF_DTYPE_NBYTE(a->dtype) == 0, BF_STATUS_INVALID_STRIDE); BF_ASSERT(cstrides[d] % BF_DTYPE_NBYTE(c->dtype) == 0, BF_STATUS_INVALID_STRIDE); astrides[d] /= BF_DTYPE_NBYTE(a->dtype); cstrides[d] /= BF_DTYPE_NBYTE(c->dtype); } // Check that output shape is correct BF_ASSERT(c->shape[ndim-1] == a->shape[ndim-2], BF_STATUS_INVALID_SHAPE); BF_ASSERT(c->shape[ndim-2] == a->shape[ndim-2], BF_STATUS_INVALID_SHAPE); // Determine transposition based on strides, and update strides and shape hipblasOperation_t trans; if( astrides[ndim-1] < astrides[ndim-2] ) { // Note: The fastest dim cannot be a batch dim BF_ASSERT(astrides[ndim-1] == 1, BF_STATUS_UNSUPPORTED_STRIDE); trans = (BF_DTYPE_IS_COMPLEX(a->dtype) ? HIPBLAS_OP_C : HIPBLAS_OP_T); } else if( astrides[ndim-1] > astrides[ndim-2] ) { // Note: The fastest dim cannot be a batch dim BF_ASSERT(astrides[ndim-2] == 1, BF_STATUS_UNSUPPORTED_STRIDE); trans = HIPBLAS_OP_N; std::swap(astrides[ndim-1], astrides[ndim-2]); } else { BF_ASSERT(false, BF_STATUS_INVALID_STRIDE); } ShapeIndexer<BF_MAX_DIMS> shape_indexer(shape, ndim-2); for( long i=0; i<shape_indexer.size(); ++i ) { auto inds = shape_indexer.at(i); void* a_data = array_get_pointer(a, inds); void* c_data = array_get_pointer(c, inds); cuda::child_stream stream(g_cuda_stream); BF_CHECK( bfMatMul_aa_exec(handle, stream, trans, shape[ndim-2], shape[ndim-1], alpha, a_data, a->dtype, astrides[ndim-2], beta, c_data, c->dtype, cstrides[ndim-2]) ); } return BF_STATUS_SUCCESS; } BFstatus bfMatMul_ab(BFlinalg handle, double alpha, BFarray const* a, BFarray const* b, double beta, BFarray const* c) { // **TODO: Implement this! BF_FAIL("Implemented", BF_STATUS_UNSUPPORTED); } BFstatus bfLinAlgCreate(BFlinalg* handle_ptr) { BF_TRACE(); BF_ASSERT(handle_ptr, BF_STATUS_INVALID_POINTER); BF_TRY_RETURN_ELSE(*handle_ptr = new BFlinalg_impl(), *handle_ptr = 0); } BFstatus bfLinAlgDestroy(BFlinalg handle) { BF_TRACE(); BF_ASSERT(handle, BF_STATUS_INVALID_HANDLE); delete handle; return BF_STATUS_SUCCESS; } // Computes c = a.b or a.a^H if b is NULL BFstatus bfLinAlgMatMul(BFlinalg handle, double alpha, BFarray const* a, // [...,i,j] BFarray const* b, // [...,j,k] double beta, BFarray const* c) { // [...,i,k] // TODO: Use weight_and_sum kernel when: // Dim i is the fastest dim of a // Dim j is the fastest dim of b // Dim k is NOT the fastest dim of c // [Dim k is small (say < 64)] // TODO: Generalise weight_and_sum kernel to arbitrary strides and dtypes // For dtypes, need Complex<T> to work for vectorized loads // UNLESS, we use something like storage_type<T>::type BF_TRACE(); BF_ASSERT(handle, BF_STATUS_INVALID_HANDLE); BF_ASSERT(a, BF_STATUS_INVALID_POINTER); BF_ASSERT(c, BF_STATUS_INVALID_POINTER); if( b ) { return bfMatMul_ab(handle, alpha, a, b, beta, c); } else { return bfMatMul_aa(handle, alpha, a, beta, c); } }
cb881c6cea0a903f984b222ffd0d5e9b7242fc8e.cu
/* * Copyright (c) 2016, The Bifrost Authors. All rights reserved. * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of The Bifrost Authors nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* SgemmEx: f16.f16->f16 f16.f16->f32 i8.i8->f32 f32.f32->f32 *CgemmEx: ci8.ci8->cf32 [>= sm_50] cf32.cf32->cf32 [>= sm_50] DgemmEx: f64.f64->f64 ZgemmEx: cf64.cf64->cf64 *Cgemm3m: cf32.cf32->cf32 (Gauss) [>= sm_50] Zgemm3m: cf64.cf64->cf64 (Gauss) [>= sm_50] Cherk: cf32->cf32 CherkEx: ci8->cf32 [>= sm_50] cf32->cf32 [>= sm_50] *Cherk3mEx: ci8->cf32 (Gauss) [>= sm_50] cf32->cf32 (Gauss) [>= sm_50] # TODO: Start with: Cgemm (+preconvert to fp32) CgemmEx (8bit, cuda >= 8.0, >=sm_50) Cgemm3m (fp32, cuda >= 8.0, >=sm_50) Cherk (+preconvert to fp32) Cherk3mEx (8bit or fp32, cuda >= 8.0, >=sm_50) The preconvert paths should support ci4, ci8, ci16, fp16 The other paths should only be used if the dtype already matches Eventually it will probably be worth integrating the xGPU kernel, given the lack of cublasHerkEx (particularly the small-N problem). */ #include <bifrost/linalg.h> #include "linalg_kernels.h" #include "assert.hpp" #include "utils.hpp" #include "cuda.hpp" #include "cuda/stream.hpp" #include "ShapeIndexer.cuh" #include "trace.hpp" class BFlinalg_impl { cublasHandle_t _cublas; // No copy-assign BFlinalg_impl(BFlinalg_impl const& ); BFlinalg_impl& operator=(BFlinalg_impl const& ); public: BFlinalg_impl() { BF_CHECK_CUBLAS_EXCEPTION(cublasCreate(&_cublas)); } ~BFlinalg_impl() { if( _cublas ) { cublasDestroy(_cublas); } } cublasHandle_t cublas() const { return _cublas; } }; BFstatus bfMatMul_aa_exec(BFlinalg handle, cudaStream_t stream, cublasOperation_t trans, long n, long k, double alpha, void const* a_data, BFdtype a_type, long a_stride, double beta, void const* c_data, BFdtype c_type, long c_stride) { BF_TRACE_STREAM(stream); BF_CHECK_CUBLAS(cublasSetStream(handle->cublas(), stream)); // Note: UPPER here means lower for row-major ordering cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER; BF_CHECK_CUBLAS(cublasSetPointerMode(handle->cublas(), CUBLAS_POINTER_MODE_HOST)); BF_ASSERT(a_data, BF_STATUS_INVALID_POINTER); BF_ASSERT(c_data, BF_STATUS_INVALID_POINTER); switch( a_type ) { case BF_DTYPE_F32: { BF_ASSERT(c_type == BF_DTYPE_F32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; BF_CHECK_CUBLAS(cublasSsyrk(handle->cublas(), uplo, trans, n, k, &alpha_f, (float*)a_data, a_stride, &beta_f, (float*)c_data, c_stride)); break; } case BF_DTYPE_F64: { BF_ASSERT(c_type == BF_DTYPE_F64, BF_STATUS_UNSUPPORTED_DTYPE); BF_CHECK_CUBLAS(cublasDsyrk(handle->cublas(), uplo, trans, n, k, &alpha, (double*)a_data, a_stride, &beta, (double*)c_data, c_stride)); break; } #if CUDART_VERSION >= 8000 case BF_DTYPE_CI8: { BF_ASSERT(c_type == BF_DTYPE_CF32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; if( get_cuda_device_cc() >= 50 ) { BF_CHECK_CUBLAS(cublasCherk3mEx(handle->cublas(), uplo, trans, n, k, &alpha_f, (cuComplex*)a_data, CUDA_C_8I, a_stride, &beta_f, (cuComplex*)c_data, CUDA_C_32F, c_stride)); break; } BF_FAIL("Supported dtype for array a", BF_STATUS_UNSUPPORTED_DTYPE); } #endif case BF_DTYPE_CF32: { BF_ASSERT(c_type == BF_DTYPE_CF32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; #if CUDART_VERSION >= 8000 if( get_cuda_device_cc() >= 50 ) { BF_CHECK_CUBLAS(cublasCherk3mEx(handle->cublas(), uplo, trans, n, k, &alpha_f, (cuComplex*)a_data, CUDA_C_32F, a_stride, &beta_f, (cuComplex*)c_data, CUDA_C_32F, c_stride)); break; } #endif BF_CHECK_CUBLAS(cublasCherk(handle->cublas(), uplo, trans, n, k, &alpha_f, (cuComplex*)a_data, a_stride, &beta_f, (cuComplex*)c_data, c_stride)); break; } case BF_DTYPE_CF64: { BF_ASSERT(c_type == BF_DTYPE_CF64, BF_STATUS_UNSUPPORTED_DTYPE); BF_CHECK_CUBLAS(cublasZherk(handle->cublas(), uplo, trans, n, k, &alpha, (cuDoubleComplex*)a_data, a_stride, &beta, (cuDoubleComplex*)c_data, c_stride)); break; } default: BF_FAIL("Supported dtype for array a", BF_STATUS_UNSUPPORTED_DTYPE); } return BF_STATUS_SUCCESS; } BFstatus bfMatMul_aa(BFlinalg handle, double alpha, BFarray const* a, double beta, BFarray const* c) { BF_TRACE(); BF_ASSERT(c->ndim == a->ndim, BF_STATUS_INVALID_SHAPE); int ndim = a->ndim; // Convert byte strides to element strides int shape[BF_MAX_DIMS]; int astrides[BF_MAX_DIMS]; int cstrides[BF_MAX_DIMS]; for( int d=0; d<ndim ; ++d ) { shape[d] = a->shape[d]; astrides[d] = a->strides[d]; cstrides[d] = c->strides[d]; } for( int d=0; d<ndim ; ++d ) { BF_ASSERT(astrides[d] % BF_DTYPE_NBYTE(a->dtype) == 0, BF_STATUS_INVALID_STRIDE); BF_ASSERT(cstrides[d] % BF_DTYPE_NBYTE(c->dtype) == 0, BF_STATUS_INVALID_STRIDE); astrides[d] /= BF_DTYPE_NBYTE(a->dtype); cstrides[d] /= BF_DTYPE_NBYTE(c->dtype); } // Check that output shape is correct BF_ASSERT(c->shape[ndim-1] == a->shape[ndim-2], BF_STATUS_INVALID_SHAPE); BF_ASSERT(c->shape[ndim-2] == a->shape[ndim-2], BF_STATUS_INVALID_SHAPE); // Determine transposition based on strides, and update strides and shape cublasOperation_t trans; if( astrides[ndim-1] < astrides[ndim-2] ) { // Note: The fastest dim cannot be a batch dim BF_ASSERT(astrides[ndim-1] == 1, BF_STATUS_UNSUPPORTED_STRIDE); trans = (BF_DTYPE_IS_COMPLEX(a->dtype) ? CUBLAS_OP_C : CUBLAS_OP_T); } else if( astrides[ndim-1] > astrides[ndim-2] ) { // Note: The fastest dim cannot be a batch dim BF_ASSERT(astrides[ndim-2] == 1, BF_STATUS_UNSUPPORTED_STRIDE); trans = CUBLAS_OP_N; std::swap(astrides[ndim-1], astrides[ndim-2]); } else { BF_ASSERT(false, BF_STATUS_INVALID_STRIDE); } ShapeIndexer<BF_MAX_DIMS> shape_indexer(shape, ndim-2); for( long i=0; i<shape_indexer.size(); ++i ) { auto inds = shape_indexer.at(i); void* a_data = array_get_pointer(a, inds); void* c_data = array_get_pointer(c, inds); cuda::child_stream stream(g_cuda_stream); BF_CHECK( bfMatMul_aa_exec(handle, stream, trans, shape[ndim-2], shape[ndim-1], alpha, a_data, a->dtype, astrides[ndim-2], beta, c_data, c->dtype, cstrides[ndim-2]) ); } return BF_STATUS_SUCCESS; } BFstatus bfMatMul_ab(BFlinalg handle, double alpha, BFarray const* a, BFarray const* b, double beta, BFarray const* c) { // **TODO: Implement this! BF_FAIL("Implemented", BF_STATUS_UNSUPPORTED); } BFstatus bfLinAlgCreate(BFlinalg* handle_ptr) { BF_TRACE(); BF_ASSERT(handle_ptr, BF_STATUS_INVALID_POINTER); BF_TRY_RETURN_ELSE(*handle_ptr = new BFlinalg_impl(), *handle_ptr = 0); } BFstatus bfLinAlgDestroy(BFlinalg handle) { BF_TRACE(); BF_ASSERT(handle, BF_STATUS_INVALID_HANDLE); delete handle; return BF_STATUS_SUCCESS; } // Computes c = a.b or a.a^H if b is NULL BFstatus bfLinAlgMatMul(BFlinalg handle, double alpha, BFarray const* a, // [...,i,j] BFarray const* b, // [...,j,k] double beta, BFarray const* c) { // [...,i,k] // TODO: Use weight_and_sum kernel when: // Dim i is the fastest dim of a // Dim j is the fastest dim of b // Dim k is NOT the fastest dim of c // [Dim k is small (say < 64)] // TODO: Generalise weight_and_sum kernel to arbitrary strides and dtypes // For dtypes, need Complex<T> to work for vectorized loads // UNLESS, we use something like storage_type<T>::type BF_TRACE(); BF_ASSERT(handle, BF_STATUS_INVALID_HANDLE); BF_ASSERT(a, BF_STATUS_INVALID_POINTER); BF_ASSERT(c, BF_STATUS_INVALID_POINTER); if( b ) { return bfMatMul_ab(handle, alpha, a, b, beta, c); } else { return bfMatMul_aa(handle, alpha, a, beta, c); } }
intro2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> __device__ float fx(float x, float y) { return x + y; } __global__ void kernel(void) { printf("res = %f\n", fx(1.0, 2.0)); } int main(int argc, char* argv[]) { hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, ); hipDeviceSynchronize(); return 0; }
intro2.cu
#include <stdio.h> #include <cuda_runtime.h> __device__ float fx(float x, float y) { return x + y; } __global__ void kernel(void) { printf("res = %f\n", fx(1.0, 2.0)); } int main(int argc, char* argv[]) { kernel<<<1, 1>>>(); cudaDeviceSynchronize(); return 0; }
dyadicAdd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void dyadicAdd(int * counter, const int length, const int shift) { if (shift > 0) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int adds = 2*shift; int Index = adds*(xIndex+1)-1; if (Index < length) { counter[Index] = counter[Index] + counter[Index-shift]; } } }
dyadicAdd.cu
#include "includes.h" __global__ void dyadicAdd(int * counter, const int length, const int shift) { if (shift > 0) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int adds = 2*shift; int Index = adds*(xIndex+1)-1; if (Index < length) { counter[Index] = counter[Index] + counter[Index-shift]; } } }
68adee539fc8d722d4856a6b53c2dd08781d783e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "msecost.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *predictions = NULL; hipMalloc(&predictions, XSIZE*YSIZE); float *target = NULL; hipMalloc(&target, XSIZE*YSIZE); int size = XSIZE*YSIZE; float *cost = NULL; hipMalloc(&cost, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( msecost), dim3(gridBlock),dim3(threadBlock), 0, 0, predictions,target,size,cost); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( msecost), dim3(gridBlock),dim3(threadBlock), 0, 0, predictions,target,size,cost); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( msecost), dim3(gridBlock),dim3(threadBlock), 0, 0, predictions,target,size,cost); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
68adee539fc8d722d4856a6b53c2dd08781d783e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "msecost.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *predictions = NULL; cudaMalloc(&predictions, XSIZE*YSIZE); float *target = NULL; cudaMalloc(&target, XSIZE*YSIZE); int size = XSIZE*YSIZE; float *cost = NULL; cudaMalloc(&cost, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); msecost<<<gridBlock,threadBlock>>>(predictions,target,size,cost); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { msecost<<<gridBlock,threadBlock>>>(predictions,target,size,cost); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { msecost<<<gridBlock,threadBlock>>>(predictions,target,size,cost); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a50c5f744d70f95b7d6e59af7aeea03f4a001e36.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_runtime_api.h> #include <sys/time.h> #include <algorithm> #include <fstream> #include <functional> #include <random> #include "HugeCTR/include/data_generator.hpp" #include "HugeCTR/include/data_readers/data_reader.hpp" #include "HugeCTR/include/embeddings/localized_slot_sparse_embedding_one_hot.hpp" #include "gtest/gtest.h" #include "roctracer/roctx.h" #include "utest/embedding/embedding_test_utils.hpp" #include "utest/embedding/sparse_embedding_hash_cpu.hpp" #include "utest/test_utils.h" #include <experimental/filesystem> using namespace HugeCTR; using namespace embedding_test; namespace fs = std::experimental::filesystem; namespace { //--------------------------------------------------------------------------------------- // global params for all testing const int train_batch_num = 10; // can not more than 32 const int test_batch_num = 1; const int train_batchsize = 1024; const int test_batchsize = 2560; const int slot_num = 26; const int max_nnz_per_slot = 1; const int max_feature_num = max_nnz_per_slot * slot_num; // max_feature_num in a sample const long long vocabulary_size = slot_num * 100; const int embedding_vec_size = 128; const int combiner = 0; // 0-sum, 1-mean const long long label_dim = 1; const long long dense_dim = 0; typedef long long T; const float scaler = 1.0f; // used in mixed precision training const float lr = 0.01f; // In order to not allocate the total size of hash table on each GPU, the users need to set the // size of max_vocabulary_size_per_gpu, which should be more than vocabulary_size/gpu_count, // eg: 1.25x of that. const int num_chunk_threads = 1; // must be 1 for CPU and GPU results comparation const int num_files = 1; const Check_t CHK = Check_t::Sum; // Check_t::Sum const char *train_file_list_name = "train_file_list.txt"; const char *test_file_list_name = "test_file_list.txt"; const char *prefix = "./data_reader_test_data/temp_dataset_"; const char *sparse_model_file = "localized_hash_table"; // std::vector<size_t> slot_sizes; // null means use vocabulary_size/gpu_count/load_factor as // max_vocabulary_size_per_gpu // CAUSION: must match vocabulary_size // std::vector<size_t> slot_sizes = {39884406,39043,17289,7420,20263,3,7120,1543,63,38532951, // 2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36}; // // for cretio dataset std::vector<size_t> slot_sizes = {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100}; // just for verify //----------------------------------------------------------------------------------------- void init_sparse_model(const char *sparse_model) { std::cout << "Init hash table"; // init hash table file: <key, solt_id, value> if (!fs::exists(sparse_model)) { fs::create_directory(sparse_model); } const std::string key_file = std::string(sparse_model) + "/" + sparse_model + ".key"; const std::string slot_file = std::string(sparse_model) + "/" + sparse_model + ".slot"; const std::string vec_file = std::string(sparse_model) + "/" + sparse_model + ".vec"; std::ofstream fs_key(key_file); std::ofstream fs_slot(slot_file); std::ofstream fs_vec(vec_file); if (!fs_key.is_open() || !fs_slot.is_open() || !fs_vec.is_open()) { ERROR_MESSAGE_("Error: file not open for writing"); } // UnifiedDataSimulator<T> ldata_sim(0, slot_num-1); // for slot_id test::UniformDataSimulator fdata_sim; // for value std::unique_ptr<float[]> buf(new float[embedding_vec_size]); for (long long i = 0; i < vocabulary_size; i++) { T key = (T)i; // T key = ldata_sim.get_num(); // CAUSION: can not set random keys here, because we need to ensure that: // 1) we can find keys in the data file from this hash table // 2) there are no repeated keys fs_key.write((char *)&key, sizeof(T)); T slot_id; if (slot_sizes.size() == 0) { // slot_id = key % slot_num; // CAUSION: need to dedicate the slot_id for each key for // // correctness verification CK_THROW_(Error_t::WrongInput, "Must set slot_sizes since there is no hashtable in " "LocalizedSlotSpasrseEmbeddingOneHot"); } else { size_t offset = 0; for (size_t j = 0; j < slot_sizes.size(); j++) { if ((key >= static_cast<T>(offset)) && (key < static_cast<T>(offset + slot_sizes[j]))) { slot_id = (T)j; break; } offset += slot_sizes[j]; } } fs_slot.write((char *)&slot_id, sizeof(T)); // float val = (float)i; // float val = 0.1f; fdata_sim.fill(buf.get(), embedding_vec_size, -0.1f, 0.1f); fs_vec.write(reinterpret_cast<const char *>(buf.get()), embedding_vec_size * sizeof(float)); } std::cout << " Done" << std::endl; } template <typename TypeEmbeddingComp> void train_and_test(const std::vector<int> &device_list, const Optimizer_t &optimizer, const Update_t &update_type) { OptHyperParams hyper_params; hyper_params.sgd.atomic_update = true; const OptParams opt_params = {optimizer, lr, hyper_params, update_type, scaler}; float tolerance; if (std::is_same<TypeEmbeddingComp, __half>::value) { tolerance = 5e-3f; } else { tolerance = 1e-4f; } test::mpi_init(); int numprocs = 1; #ifdef ENABLE_MPI MPI_Comm_size(MPI_COMM_WORLD, &numprocs); #endif // if there are multi-node, we assume each node has the same gpu device_list std::vector<std::vector<int>> vvgpu; for (int i = 0; i < numprocs; i++) { vvgpu.push_back(device_list); } const auto &resource_manager = ResourceManager::create(vvgpu, 0); if (resource_manager->is_master_process()) { std::cout << "rank " << resource_manager->get_process_id() << " is generating data" << std::endl; { // re-generate the dataset files std::ifstream file(train_file_list_name); if (file.good()) { std::remove(train_file_list_name); } } { // re-generate the dataset files std::ifstream file(test_file_list_name); if (file.good()) { std::remove(test_file_list_name); } } // data generation: key's corresponding slot_id=(key%slot_num) if (slot_sizes.size() > 0) { HugeCTR::data_generation_for_localized_test<T, CHK>( train_file_list_name, prefix, num_files, train_batch_num * train_batchsize, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); HugeCTR::data_generation_for_localized_test<T, CHK>( test_file_list_name, prefix, num_files, test_batch_num * test_batchsize, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); } else { CK_THROW_( Error_t::WrongInput, "Must set slot_sizes since there is no hashtable in LocalizedSlotSpasrseEmbeddingOneHot"); } } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); std::cout << "This is rank: " << resource_manager->get_process_id() << std::endl; #endif // setup a data reader const DataReaderSparseParam param = {DataReaderSparse_t::Localized, max_nnz_per_slot * slot_num, max_nnz_per_slot, slot_num}; std::vector<DataReaderSparseParam> params; params.push_back(param); std::unique_ptr<DataReader<T>> train_data_reader(new DataReader<T>( train_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads, false, 0)); train_data_reader->create_drwg_norm(train_file_list_name, CHK); std::unique_ptr<DataReader<T>> test_data_reader(new DataReader<T>( test_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads, false, 0)); test_data_reader->create_drwg_norm(test_file_list_name, CHK); // generate hashtable if (resource_manager->is_master_process()) { init_sparse_model(sparse_model_file); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif const SparseEmbeddingHashParams embedding_params = { train_batchsize, test_batchsize, 0, slot_sizes, embedding_vec_size, max_feature_num, slot_num, combiner, opt_params}; std::unique_ptr<Embedding<T, TypeEmbeddingComp>> embedding( new LocalizedSlotSparseEmbeddingOneHot<T, TypeEmbeddingComp>( train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(), train_data_reader->get_nnz_array(), test_data_reader->get_row_offsets_tensors(), test_data_reader->get_value_tensors(), test_data_reader->get_nnz_array(), embedding_params, resource_manager)); // upload hash table to device embedding->load_parameters(sparse_model_file); // for SparseEmbeddingCpu std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> embedding_cpu( new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>( train_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num, label_dim, dense_dim, CHK, train_batch_num * train_batchsize, combiner, opt_params, train_file_list_name, sparse_model_file, SparseEmbedding_t::Localized)); TypeEmbeddingComp *embedding_feature_from_cpu = embedding_cpu->get_forward_results(); TypeEmbeddingComp *wgrad_from_cpu = embedding_cpu->get_backward_results(); T *hash_table_key_from_cpu = embedding_cpu->get_hash_table_key_ptr(); float *hash_table_value_from_cpu = embedding_cpu->get_hash_table_value_ptr(); // for results check std::shared_ptr<GeneralBuffer2<HostAllocator>> buf = GeneralBuffer2<HostAllocator>::create(); Tensor2<TypeEmbeddingComp> embedding_feature_from_gpu; buf->reserve({train_batchsize * slot_num * embedding_vec_size}, &embedding_feature_from_gpu); Tensor2<TypeEmbeddingComp> wgrad_from_gpu; buf->reserve({train_batchsize * slot_num * embedding_vec_size}, &wgrad_from_gpu); Tensor2<TypeEmbeddingComp> embedding_feature_from_gpu_eval; buf->reserve({test_batchsize * slot_num * embedding_vec_size}, &embedding_feature_from_gpu_eval); buf->allocate(); typedef struct TypeHashValue_ { float data[embedding_vec_size]; } TypeHashValue; for (int i = 0; i < train_batch_num; i++) { printf("Rank%d: Round %d start training:\n", resource_manager->get_process_id(), i); // call read a batch printf("Rank%d: data_reader->read_a_batch_to_device()\n", resource_manager->get_process_id()); train_data_reader->read_a_batch_to_device(); // GPU forward printf("Rank%d: embedding->forward()\n", resource_manager->get_process_id()); embedding->forward(true); // check the result of forward printf("Rank%d: embedding->get_forward_results()\n", resource_manager->get_process_id()); embedding->get_forward_results(true, embedding_feature_from_gpu); // memcpy from GPU to CPU if (resource_manager->is_master_process()) { // CPU forward printf("Rank0: embedding_cpu->forward()\n"); embedding_cpu->forward(); printf("Rank0: check forward results\n"); ASSERT_TRUE(compare_embedding_feature(train_batchsize * slot_num * embedding_vec_size, embedding_feature_from_gpu.get_ptr(), embedding_feature_from_cpu, tolerance)); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif // GPU backward printf("Rank%d: embedding->backward()\n", resource_manager->get_process_id()); embedding->backward(); // check the result of backward printf("Rank%d: embedding->get_backward_results()\n", resource_manager->get_process_id()); embedding->get_backward_results(wgrad_from_gpu, 0); if (resource_manager->is_master_process()) { // CPU backward printf("Rank0: embedding_cpu->backward()\n"); embedding_cpu->backward(); printf("Rank0: check backward results: GPU and CPU\n"); ASSERT_TRUE(compare_wgrad(train_batchsize * slot_num * embedding_vec_size, wgrad_from_gpu.get_ptr(), wgrad_from_cpu, tolerance)); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif // GPU update_params printf("Rank%d: embedding->update_params()\n", resource_manager->get_process_id()); embedding->update_params(); if (resource_manager->is_master_process()) { // CPU update_params printf("Rank0: embedding_cpu->update_params()\n"); embedding_cpu->update_params(); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif printf("Rank%d: Round %d end:\n", resource_manager->get_process_id(), i); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // create new obj for eval() embedding->dump_parameters(sparse_model_file); // for SparseEmbeddingCpu eval std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> test_embedding_cpu( new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>( test_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num, label_dim, dense_dim, CHK, test_batch_num * test_batchsize, combiner, opt_params, test_file_list_name, sparse_model_file, SparseEmbedding_t::Localized)); TypeEmbeddingComp *embedding_feature_from_cpu_eval = test_embedding_cpu->get_forward_results(); { ///////////////////////////////////////////////////////////////////////////////////////////// // eval printf("\nRank%d: Round start eval:\n", resource_manager->get_process_id()); // call read a batch printf("Rank%d: data_reader_eval->read_a_batch_to_device()\n", resource_manager->get_process_id()); test_data_reader->read_a_batch_to_device(); // GPU forward printf("Rank%d: embedding_eval->forward()\n", resource_manager->get_process_id()); embedding->forward(false); // check the result of forward printf("Rank%d: embedding_eval->get_forward_results()\n", resource_manager->get_process_id()); embedding->get_forward_results(false, embedding_feature_from_gpu_eval); // memcpy from GPU to CPU if (resource_manager->is_master_process()) { // CPU forward printf("Rank0: embedding_cpu_eval->forward()\n"); test_embedding_cpu->forward(); printf("Rank0: check forward results\n"); ASSERT_TRUE(compare_embedding_feature(test_batchsize * slot_num * embedding_vec_size, embedding_feature_from_gpu_eval.get_ptr(), embedding_feature_from_cpu_eval, tolerance)); } } test::mpi_finalize(); } template <typename TypeEmbeddingComp> void load_and_dump(const std::vector<int> &device_list, const Optimizer_t &optimizer, const Update_t &update_type) { OptHyperParams hyper_params; hyper_params.sgd.atomic_update = true; const OptParams opt_params = {optimizer, lr, hyper_params, update_type, scaler}; std::vector<std::vector<int>> vvgpu; vvgpu.push_back(device_list); const auto &resource_manager = ResourceManager::create(vvgpu, 0); // re-generate the dataset files { std::ifstream fs(train_file_list_name); if (fs.good()) { std::remove(train_file_list_name); } } // data generation: key's corresponding slot_id=(key%slot_num) if (slot_sizes.size() > 0) { HugeCTR::data_generation_for_localized_test<T, CHK>( train_file_list_name, prefix, num_files, train_batch_num * train_batchsize, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); HugeCTR::data_generation_for_localized_test<T, CHK>( test_file_list_name, prefix, num_files, test_batch_num * test_batchsize, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); } else { CK_THROW_( Error_t::WrongInput, "Must set slot_sizes since there is no hashtable in LocalizedSlotSpasrseEmbeddingOneHot"); } // setup a data reader const DataReaderSparseParam param = {DataReaderSparse_t::Localized, max_nnz_per_slot * slot_num, max_nnz_per_slot, slot_num}; std::vector<DataReaderSparseParam> params; params.push_back(param); std::unique_ptr<DataReader<T>> train_data_reader(new DataReader<T>( train_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads, false, 0)); train_data_reader->create_drwg_norm(train_file_list_name, CHK); // generate hashtable init_sparse_model(sparse_model_file); const SparseEmbeddingHashParams embedding_params = { train_batchsize, test_batchsize, 0, slot_sizes, embedding_vec_size, max_feature_num, slot_num, combiner, opt_params}; std::unique_ptr<Embedding<T, TypeEmbeddingComp>> embedding( new LocalizedSlotSparseEmbeddingOneHot<T, TypeEmbeddingComp>( train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(), train_data_reader->get_nnz_array(), train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(), train_data_reader->get_nnz_array(), embedding_params, resource_manager)); // upload hash table to device embedding->load_parameters(sparse_model_file); printf("max_vocabulary_size=%zu, vocabulary_size=%zu\n", embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size()); std::shared_ptr<GeneralBuffer2<CudaHostAllocator>> blobs_buff = GeneralBuffer2<CudaHostAllocator>::create(); Tensor2<T> keys; blobs_buff->reserve({embedding->get_max_vocabulary_size()}, &keys); Tensor2<size_t> slot_id; blobs_buff->reserve({embedding->get_max_vocabulary_size()}, &slot_id); Tensor2<float> embeddings; blobs_buff->reserve({embedding->get_max_vocabulary_size(), embedding_vec_size}, &embeddings); blobs_buff->allocate(); BufferBag buf_bag; buf_bag.keys = keys.shrink(); buf_bag.slot_id = slot_id.shrink(); buf_bag.embedding = embeddings; size_t dump_size; embedding->dump_parameters(buf_bag, &dump_size); printf("dump_size=%zu, max_vocabulary_size=%zu, vocabulary_size=%zu\n", dump_size, embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size()); embedding->dump_parameters(buf_bag, &dump_size); printf("dump_size=%zu, max_vocabulary_size=%zu, vocabulary_size=%zu\n", dump_size, embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size()); embedding->reset(); printf("max_vocabulary_size=%zu, vocabulary_size=%zu\n", embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size()); embedding->load_parameters(buf_bag, dump_size); printf("max_vocabulary_size=%zu, vocabulary_size=%zu\n", embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size()); embedding->dump_parameters(buf_bag, &dump_size); printf("dump_size=%zu, max_vocabulary_size=%zu, vocabulary_size=%zu\n", dump_size, embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size()); } template <typename TypeEmbeddingComp> void load_and_dump_file(const std::vector<int> &device_list, const Optimizer_t &optimizer, const Update_t &update_type) { std::string sparse_model_src("sparse_model_src"); std::string sparse_model_dst("sparse_model_dst"); float tolerance = 1e-4f; OptHyperParams hyper_params; hyper_params.sgd.atomic_update = true; const OptParams opt_params = {optimizer, lr, hyper_params, update_type, scaler}; int numprocs = 1, pid = 0; std::vector<std::vector<int>> vvgpu; test::mpi_init(); for (int i = 0; i < numprocs; i++) { vvgpu.push_back(device_list); } const auto &resource_manager = ResourceManager::create(vvgpu, 0); if (pid == 0) { // re-generate the dataset files if (fs::exists(train_file_list_name)) { fs::remove(train_file_list_name); } // data generation: key's corresponding slot_id=(key%slot_num) if (slot_sizes.size() > 0) { HugeCTR::data_generation_for_localized_test<T, CHK>( train_file_list_name, prefix, num_files, train_batch_num * train_batchsize, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); HugeCTR::data_generation_for_localized_test<T, CHK>( test_file_list_name, prefix, num_files, test_batch_num * test_batchsize, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); } else { CK_THROW_( Error_t::WrongInput, "Must set slot_sizes since there is no hashtable in LocalizedSlotSpasrseEmbeddingOneHot"); } } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif // setup a data reader const DataReaderSparseParam param = {DataReaderSparse_t::Localized, max_nnz_per_slot * slot_num, max_nnz_per_slot, slot_num}; std::vector<DataReaderSparseParam> params; params.push_back(param); std::unique_ptr<DataReader<T>> train_data_reader(new DataReader<T>( train_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads, false, 0)); train_data_reader->create_drwg_norm(train_file_list_name, CHK); const SparseEmbeddingHashParams embedding_params = { train_batchsize, test_batchsize, 0, slot_sizes, embedding_vec_size, max_feature_num, slot_num, combiner, opt_params}; std::unique_ptr<Embedding<T, TypeEmbeddingComp>> embedding( new LocalizedSlotSparseEmbeddingOneHot<T, TypeEmbeddingComp>( train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(), train_data_reader->get_nnz_array(), train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(), train_data_reader->get_nnz_array(), embedding_params, resource_manager)); // init hash table file if (pid == 0) { init_sparse_model(sparse_model_src.c_str()); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif // upload hash table to device embedding->load_parameters(sparse_model_src); if (pid == 0) { printf("max_vocabulary_size=%zu, vocabulary_size=%zu\n", embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size()); } // dump sparse model to file embedding->dump_parameters(sparse_model_dst); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif auto load_sparse_model_to_map = [](std::vector<T>& key_vec, std::vector<T>& slot_vec, std::vector<float>& vec_vec, const std::string& sparse_model) { const std::string key_file(sparse_model + "/" + sparse_model + ".key"); const std::string slot_file(sparse_model + "/" + sparse_model + ".slot"); const std::string vec_file(sparse_model + "/" + sparse_model + ".vec"); std::ifstream fs_key(key_file, std::ifstream::binary); std::ifstream fs_slot(slot_file, std::ifstream::binary); std::ifstream fs_vec(vec_file, std::ifstream::binary); const size_t key_file_size_in_B = fs::file_size(key_file); const size_t slot_file_size_in_B = fs::file_size(slot_file); const size_t vec_file_size_in_B = fs::file_size(vec_file); const long long num_key = key_file_size_in_B / sizeof(T); const long long num_slot = slot_file_size_in_B / sizeof(T); const long long num_vec = vec_file_size_in_B / (sizeof(float) * embedding_vec_size); if (num_key != num_vec || num_key != num_slot || num_key != vocabulary_size) { CK_THROW_(Error_t::BrokenFile, "num_key != num_vec (num_slot) || num_key != vocabulary_size"); } key_vec.clear(); key_vec.reserve(num_key); slot_vec.clear(); slot_vec.reserve(num_key); vec_vec.clear(); vec_vec.reserve(num_vec * embedding_vec_size); fs_key.read(reinterpret_cast<char *>(key_vec.data()), key_file_size_in_B); fs_slot.read(reinterpret_cast<char *>(slot_vec.data()), slot_file_size_in_B); fs_vec.read(reinterpret_cast<char *>(vec_vec.data()), vec_file_size_in_B); }; std::vector<T> hash_table_key_from_cpu; std::vector<T> slot_id_from_cpu; std::vector<float> hash_table_value_from_cpu; load_sparse_model_to_map(hash_table_key_from_cpu, slot_id_from_cpu, hash_table_value_from_cpu, sparse_model_src); std::vector<T> hash_table_key_from_gpu; std::vector<T> slot_id_from_gpu; std::vector<float> hash_table_value_from_gpu; load_sparse_model_to_map(hash_table_key_from_gpu, slot_id_from_gpu, hash_table_value_from_gpu, sparse_model_dst); typedef struct TypeHashValue_ { float data[embedding_vec_size]; } TypeHashValue; ASSERT_TRUE(compare_hash_table(vocabulary_size, hash_table_key_from_gpu.data(), reinterpret_cast<TypeHashValue *>(hash_table_value_from_gpu.data()), hash_table_key_from_cpu.data(), reinterpret_cast<TypeHashValue *>(hash_table_value_from_cpu.data()), tolerance)); ASSERT_TRUE(compare_key_slot(vocabulary_size, hash_table_key_from_gpu.data(), slot_id_from_gpu.data(), hash_table_key_from_cpu.data(), slot_id_from_cpu.data())); test::mpi_finalize(); } } // namespace TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_1gpu) { train_and_test<float>({0}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_4gpu) { train_and_test<float>({0, 1, 2, 3}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_global_update_1gpu) { train_and_test<float>({0}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_global_update_4gpu) { train_and_test<float>({0, 1, 2, 3}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_1gpu) { train_and_test<__half>({0}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_4gpu) { train_and_test<__half>({0, 1, 2, 3}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_global_update_1gpu) { train_and_test<__half>({0}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_global_update_4gpu) { train_and_test<__half>({0, 1, 2, 3}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_one_hot_test, load_and_dump) { load_and_dump<float>({0}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_one_hot_test, load_and_dump_file_1gpu) { load_and_dump_file<float>({0}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_one_hot_test, load_and_dump_file_4gpu) { load_and_dump_file<float>({0, 1, 2, 3}, Optimizer_t::SGD, Update_t::Global); }
a50c5f744d70f95b7d6e59af7aeea03f4a001e36.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_profiler_api.h> #include <sys/time.h> #include <algorithm> #include <fstream> #include <functional> #include <random> #include "HugeCTR/include/data_generator.hpp" #include "HugeCTR/include/data_readers/data_reader.hpp" #include "HugeCTR/include/embeddings/localized_slot_sparse_embedding_one_hot.hpp" #include "gtest/gtest.h" #include "nvToolsExt.h" #include "utest/embedding/embedding_test_utils.hpp" #include "utest/embedding/sparse_embedding_hash_cpu.hpp" #include "utest/test_utils.h" #include <experimental/filesystem> using namespace HugeCTR; using namespace embedding_test; namespace fs = std::experimental::filesystem; namespace { //--------------------------------------------------------------------------------------- // global params for all testing const int train_batch_num = 10; // can not more than 32 const int test_batch_num = 1; const int train_batchsize = 1024; const int test_batchsize = 2560; const int slot_num = 26; const int max_nnz_per_slot = 1; const int max_feature_num = max_nnz_per_slot * slot_num; // max_feature_num in a sample const long long vocabulary_size = slot_num * 100; const int embedding_vec_size = 128; const int combiner = 0; // 0-sum, 1-mean const long long label_dim = 1; const long long dense_dim = 0; typedef long long T; const float scaler = 1.0f; // used in mixed precision training const float lr = 0.01f; // In order to not allocate the total size of hash table on each GPU, the users need to set the // size of max_vocabulary_size_per_gpu, which should be more than vocabulary_size/gpu_count, // eg: 1.25x of that. const int num_chunk_threads = 1; // must be 1 for CPU and GPU results comparation const int num_files = 1; const Check_t CHK = Check_t::Sum; // Check_t::Sum const char *train_file_list_name = "train_file_list.txt"; const char *test_file_list_name = "test_file_list.txt"; const char *prefix = "./data_reader_test_data/temp_dataset_"; const char *sparse_model_file = "localized_hash_table"; // std::vector<size_t> slot_sizes; // null means use vocabulary_size/gpu_count/load_factor as // max_vocabulary_size_per_gpu // CAUSION: must match vocabulary_size // std::vector<size_t> slot_sizes = {39884406,39043,17289,7420,20263,3,7120,1543,63,38532951, // 2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36}; // // for cretio dataset std::vector<size_t> slot_sizes = {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100}; // just for verify //----------------------------------------------------------------------------------------- void init_sparse_model(const char *sparse_model) { std::cout << "Init hash table"; // init hash table file: <key, solt_id, value> if (!fs::exists(sparse_model)) { fs::create_directory(sparse_model); } const std::string key_file = std::string(sparse_model) + "/" + sparse_model + ".key"; const std::string slot_file = std::string(sparse_model) + "/" + sparse_model + ".slot"; const std::string vec_file = std::string(sparse_model) + "/" + sparse_model + ".vec"; std::ofstream fs_key(key_file); std::ofstream fs_slot(slot_file); std::ofstream fs_vec(vec_file); if (!fs_key.is_open() || !fs_slot.is_open() || !fs_vec.is_open()) { ERROR_MESSAGE_("Error: file not open for writing"); } // UnifiedDataSimulator<T> ldata_sim(0, slot_num-1); // for slot_id test::UniformDataSimulator fdata_sim; // for value std::unique_ptr<float[]> buf(new float[embedding_vec_size]); for (long long i = 0; i < vocabulary_size; i++) { T key = (T)i; // T key = ldata_sim.get_num(); // CAUSION: can not set random keys here, because we need to ensure that: // 1) we can find keys in the data file from this hash table // 2) there are no repeated keys fs_key.write((char *)&key, sizeof(T)); T slot_id; if (slot_sizes.size() == 0) { // slot_id = key % slot_num; // CAUSION: need to dedicate the slot_id for each key for // // correctness verification CK_THROW_(Error_t::WrongInput, "Must set slot_sizes since there is no hashtable in " "LocalizedSlotSpasrseEmbeddingOneHot"); } else { size_t offset = 0; for (size_t j = 0; j < slot_sizes.size(); j++) { if ((key >= static_cast<T>(offset)) && (key < static_cast<T>(offset + slot_sizes[j]))) { slot_id = (T)j; break; } offset += slot_sizes[j]; } } fs_slot.write((char *)&slot_id, sizeof(T)); // float val = (float)i; // float val = 0.1f; fdata_sim.fill(buf.get(), embedding_vec_size, -0.1f, 0.1f); fs_vec.write(reinterpret_cast<const char *>(buf.get()), embedding_vec_size * sizeof(float)); } std::cout << " Done" << std::endl; } template <typename TypeEmbeddingComp> void train_and_test(const std::vector<int> &device_list, const Optimizer_t &optimizer, const Update_t &update_type) { OptHyperParams hyper_params; hyper_params.sgd.atomic_update = true; const OptParams opt_params = {optimizer, lr, hyper_params, update_type, scaler}; float tolerance; if (std::is_same<TypeEmbeddingComp, __half>::value) { tolerance = 5e-3f; } else { tolerance = 1e-4f; } test::mpi_init(); int numprocs = 1; #ifdef ENABLE_MPI MPI_Comm_size(MPI_COMM_WORLD, &numprocs); #endif // if there are multi-node, we assume each node has the same gpu device_list std::vector<std::vector<int>> vvgpu; for (int i = 0; i < numprocs; i++) { vvgpu.push_back(device_list); } const auto &resource_manager = ResourceManager::create(vvgpu, 0); if (resource_manager->is_master_process()) { std::cout << "rank " << resource_manager->get_process_id() << " is generating data" << std::endl; { // re-generate the dataset files std::ifstream file(train_file_list_name); if (file.good()) { std::remove(train_file_list_name); } } { // re-generate the dataset files std::ifstream file(test_file_list_name); if (file.good()) { std::remove(test_file_list_name); } } // data generation: key's corresponding slot_id=(key%slot_num) if (slot_sizes.size() > 0) { HugeCTR::data_generation_for_localized_test<T, CHK>( train_file_list_name, prefix, num_files, train_batch_num * train_batchsize, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); HugeCTR::data_generation_for_localized_test<T, CHK>( test_file_list_name, prefix, num_files, test_batch_num * test_batchsize, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); } else { CK_THROW_( Error_t::WrongInput, "Must set slot_sizes since there is no hashtable in LocalizedSlotSpasrseEmbeddingOneHot"); } } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); std::cout << "This is rank: " << resource_manager->get_process_id() << std::endl; #endif // setup a data reader const DataReaderSparseParam param = {DataReaderSparse_t::Localized, max_nnz_per_slot * slot_num, max_nnz_per_slot, slot_num}; std::vector<DataReaderSparseParam> params; params.push_back(param); std::unique_ptr<DataReader<T>> train_data_reader(new DataReader<T>( train_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads, false, 0)); train_data_reader->create_drwg_norm(train_file_list_name, CHK); std::unique_ptr<DataReader<T>> test_data_reader(new DataReader<T>( test_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads, false, 0)); test_data_reader->create_drwg_norm(test_file_list_name, CHK); // generate hashtable if (resource_manager->is_master_process()) { init_sparse_model(sparse_model_file); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif const SparseEmbeddingHashParams embedding_params = { train_batchsize, test_batchsize, 0, slot_sizes, embedding_vec_size, max_feature_num, slot_num, combiner, opt_params}; std::unique_ptr<Embedding<T, TypeEmbeddingComp>> embedding( new LocalizedSlotSparseEmbeddingOneHot<T, TypeEmbeddingComp>( train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(), train_data_reader->get_nnz_array(), test_data_reader->get_row_offsets_tensors(), test_data_reader->get_value_tensors(), test_data_reader->get_nnz_array(), embedding_params, resource_manager)); // upload hash table to device embedding->load_parameters(sparse_model_file); // for SparseEmbeddingCpu std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> embedding_cpu( new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>( train_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num, label_dim, dense_dim, CHK, train_batch_num * train_batchsize, combiner, opt_params, train_file_list_name, sparse_model_file, SparseEmbedding_t::Localized)); TypeEmbeddingComp *embedding_feature_from_cpu = embedding_cpu->get_forward_results(); TypeEmbeddingComp *wgrad_from_cpu = embedding_cpu->get_backward_results(); T *hash_table_key_from_cpu = embedding_cpu->get_hash_table_key_ptr(); float *hash_table_value_from_cpu = embedding_cpu->get_hash_table_value_ptr(); // for results check std::shared_ptr<GeneralBuffer2<HostAllocator>> buf = GeneralBuffer2<HostAllocator>::create(); Tensor2<TypeEmbeddingComp> embedding_feature_from_gpu; buf->reserve({train_batchsize * slot_num * embedding_vec_size}, &embedding_feature_from_gpu); Tensor2<TypeEmbeddingComp> wgrad_from_gpu; buf->reserve({train_batchsize * slot_num * embedding_vec_size}, &wgrad_from_gpu); Tensor2<TypeEmbeddingComp> embedding_feature_from_gpu_eval; buf->reserve({test_batchsize * slot_num * embedding_vec_size}, &embedding_feature_from_gpu_eval); buf->allocate(); typedef struct TypeHashValue_ { float data[embedding_vec_size]; } TypeHashValue; for (int i = 0; i < train_batch_num; i++) { printf("Rank%d: Round %d start training:\n", resource_manager->get_process_id(), i); // call read a batch printf("Rank%d: data_reader->read_a_batch_to_device()\n", resource_manager->get_process_id()); train_data_reader->read_a_batch_to_device(); // GPU forward printf("Rank%d: embedding->forward()\n", resource_manager->get_process_id()); embedding->forward(true); // check the result of forward printf("Rank%d: embedding->get_forward_results()\n", resource_manager->get_process_id()); embedding->get_forward_results(true, embedding_feature_from_gpu); // memcpy from GPU to CPU if (resource_manager->is_master_process()) { // CPU forward printf("Rank0: embedding_cpu->forward()\n"); embedding_cpu->forward(); printf("Rank0: check forward results\n"); ASSERT_TRUE(compare_embedding_feature(train_batchsize * slot_num * embedding_vec_size, embedding_feature_from_gpu.get_ptr(), embedding_feature_from_cpu, tolerance)); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif // GPU backward printf("Rank%d: embedding->backward()\n", resource_manager->get_process_id()); embedding->backward(); // check the result of backward printf("Rank%d: embedding->get_backward_results()\n", resource_manager->get_process_id()); embedding->get_backward_results(wgrad_from_gpu, 0); if (resource_manager->is_master_process()) { // CPU backward printf("Rank0: embedding_cpu->backward()\n"); embedding_cpu->backward(); printf("Rank0: check backward results: GPU and CPU\n"); ASSERT_TRUE(compare_wgrad(train_batchsize * slot_num * embedding_vec_size, wgrad_from_gpu.get_ptr(), wgrad_from_cpu, tolerance)); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif // GPU update_params printf("Rank%d: embedding->update_params()\n", resource_manager->get_process_id()); embedding->update_params(); if (resource_manager->is_master_process()) { // CPU update_params printf("Rank0: embedding_cpu->update_params()\n"); embedding_cpu->update_params(); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif printf("Rank%d: Round %d end:\n", resource_manager->get_process_id(), i); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // create new obj for eval() embedding->dump_parameters(sparse_model_file); // for SparseEmbeddingCpu eval std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> test_embedding_cpu( new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>( test_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num, label_dim, dense_dim, CHK, test_batch_num * test_batchsize, combiner, opt_params, test_file_list_name, sparse_model_file, SparseEmbedding_t::Localized)); TypeEmbeddingComp *embedding_feature_from_cpu_eval = test_embedding_cpu->get_forward_results(); { ///////////////////////////////////////////////////////////////////////////////////////////// // eval printf("\nRank%d: Round start eval:\n", resource_manager->get_process_id()); // call read a batch printf("Rank%d: data_reader_eval->read_a_batch_to_device()\n", resource_manager->get_process_id()); test_data_reader->read_a_batch_to_device(); // GPU forward printf("Rank%d: embedding_eval->forward()\n", resource_manager->get_process_id()); embedding->forward(false); // check the result of forward printf("Rank%d: embedding_eval->get_forward_results()\n", resource_manager->get_process_id()); embedding->get_forward_results(false, embedding_feature_from_gpu_eval); // memcpy from GPU to CPU if (resource_manager->is_master_process()) { // CPU forward printf("Rank0: embedding_cpu_eval->forward()\n"); test_embedding_cpu->forward(); printf("Rank0: check forward results\n"); ASSERT_TRUE(compare_embedding_feature(test_batchsize * slot_num * embedding_vec_size, embedding_feature_from_gpu_eval.get_ptr(), embedding_feature_from_cpu_eval, tolerance)); } } test::mpi_finalize(); } template <typename TypeEmbeddingComp> void load_and_dump(const std::vector<int> &device_list, const Optimizer_t &optimizer, const Update_t &update_type) { OptHyperParams hyper_params; hyper_params.sgd.atomic_update = true; const OptParams opt_params = {optimizer, lr, hyper_params, update_type, scaler}; std::vector<std::vector<int>> vvgpu; vvgpu.push_back(device_list); const auto &resource_manager = ResourceManager::create(vvgpu, 0); // re-generate the dataset files { std::ifstream fs(train_file_list_name); if (fs.good()) { std::remove(train_file_list_name); } } // data generation: key's corresponding slot_id=(key%slot_num) if (slot_sizes.size() > 0) { HugeCTR::data_generation_for_localized_test<T, CHK>( train_file_list_name, prefix, num_files, train_batch_num * train_batchsize, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); HugeCTR::data_generation_for_localized_test<T, CHK>( test_file_list_name, prefix, num_files, test_batch_num * test_batchsize, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); } else { CK_THROW_( Error_t::WrongInput, "Must set slot_sizes since there is no hashtable in LocalizedSlotSpasrseEmbeddingOneHot"); } // setup a data reader const DataReaderSparseParam param = {DataReaderSparse_t::Localized, max_nnz_per_slot * slot_num, max_nnz_per_slot, slot_num}; std::vector<DataReaderSparseParam> params; params.push_back(param); std::unique_ptr<DataReader<T>> train_data_reader(new DataReader<T>( train_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads, false, 0)); train_data_reader->create_drwg_norm(train_file_list_name, CHK); // generate hashtable init_sparse_model(sparse_model_file); const SparseEmbeddingHashParams embedding_params = { train_batchsize, test_batchsize, 0, slot_sizes, embedding_vec_size, max_feature_num, slot_num, combiner, opt_params}; std::unique_ptr<Embedding<T, TypeEmbeddingComp>> embedding( new LocalizedSlotSparseEmbeddingOneHot<T, TypeEmbeddingComp>( train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(), train_data_reader->get_nnz_array(), train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(), train_data_reader->get_nnz_array(), embedding_params, resource_manager)); // upload hash table to device embedding->load_parameters(sparse_model_file); printf("max_vocabulary_size=%zu, vocabulary_size=%zu\n", embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size()); std::shared_ptr<GeneralBuffer2<CudaHostAllocator>> blobs_buff = GeneralBuffer2<CudaHostAllocator>::create(); Tensor2<T> keys; blobs_buff->reserve({embedding->get_max_vocabulary_size()}, &keys); Tensor2<size_t> slot_id; blobs_buff->reserve({embedding->get_max_vocabulary_size()}, &slot_id); Tensor2<float> embeddings; blobs_buff->reserve({embedding->get_max_vocabulary_size(), embedding_vec_size}, &embeddings); blobs_buff->allocate(); BufferBag buf_bag; buf_bag.keys = keys.shrink(); buf_bag.slot_id = slot_id.shrink(); buf_bag.embedding = embeddings; size_t dump_size; embedding->dump_parameters(buf_bag, &dump_size); printf("dump_size=%zu, max_vocabulary_size=%zu, vocabulary_size=%zu\n", dump_size, embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size()); embedding->dump_parameters(buf_bag, &dump_size); printf("dump_size=%zu, max_vocabulary_size=%zu, vocabulary_size=%zu\n", dump_size, embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size()); embedding->reset(); printf("max_vocabulary_size=%zu, vocabulary_size=%zu\n", embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size()); embedding->load_parameters(buf_bag, dump_size); printf("max_vocabulary_size=%zu, vocabulary_size=%zu\n", embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size()); embedding->dump_parameters(buf_bag, &dump_size); printf("dump_size=%zu, max_vocabulary_size=%zu, vocabulary_size=%zu\n", dump_size, embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size()); } template <typename TypeEmbeddingComp> void load_and_dump_file(const std::vector<int> &device_list, const Optimizer_t &optimizer, const Update_t &update_type) { std::string sparse_model_src("sparse_model_src"); std::string sparse_model_dst("sparse_model_dst"); float tolerance = 1e-4f; OptHyperParams hyper_params; hyper_params.sgd.atomic_update = true; const OptParams opt_params = {optimizer, lr, hyper_params, update_type, scaler}; int numprocs = 1, pid = 0; std::vector<std::vector<int>> vvgpu; test::mpi_init(); for (int i = 0; i < numprocs; i++) { vvgpu.push_back(device_list); } const auto &resource_manager = ResourceManager::create(vvgpu, 0); if (pid == 0) { // re-generate the dataset files if (fs::exists(train_file_list_name)) { fs::remove(train_file_list_name); } // data generation: key's corresponding slot_id=(key%slot_num) if (slot_sizes.size() > 0) { HugeCTR::data_generation_for_localized_test<T, CHK>( train_file_list_name, prefix, num_files, train_batch_num * train_batchsize, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); HugeCTR::data_generation_for_localized_test<T, CHK>( test_file_list_name, prefix, num_files, test_batch_num * test_batchsize, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); } else { CK_THROW_( Error_t::WrongInput, "Must set slot_sizes since there is no hashtable in LocalizedSlotSpasrseEmbeddingOneHot"); } } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif // setup a data reader const DataReaderSparseParam param = {DataReaderSparse_t::Localized, max_nnz_per_slot * slot_num, max_nnz_per_slot, slot_num}; std::vector<DataReaderSparseParam> params; params.push_back(param); std::unique_ptr<DataReader<T>> train_data_reader(new DataReader<T>( train_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads, false, 0)); train_data_reader->create_drwg_norm(train_file_list_name, CHK); const SparseEmbeddingHashParams embedding_params = { train_batchsize, test_batchsize, 0, slot_sizes, embedding_vec_size, max_feature_num, slot_num, combiner, opt_params}; std::unique_ptr<Embedding<T, TypeEmbeddingComp>> embedding( new LocalizedSlotSparseEmbeddingOneHot<T, TypeEmbeddingComp>( train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(), train_data_reader->get_nnz_array(), train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(), train_data_reader->get_nnz_array(), embedding_params, resource_manager)); // init hash table file if (pid == 0) { init_sparse_model(sparse_model_src.c_str()); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif // upload hash table to device embedding->load_parameters(sparse_model_src); if (pid == 0) { printf("max_vocabulary_size=%zu, vocabulary_size=%zu\n", embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size()); } // dump sparse model to file embedding->dump_parameters(sparse_model_dst); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif auto load_sparse_model_to_map = [](std::vector<T>& key_vec, std::vector<T>& slot_vec, std::vector<float>& vec_vec, const std::string& sparse_model) { const std::string key_file(sparse_model + "/" + sparse_model + ".key"); const std::string slot_file(sparse_model + "/" + sparse_model + ".slot"); const std::string vec_file(sparse_model + "/" + sparse_model + ".vec"); std::ifstream fs_key(key_file, std::ifstream::binary); std::ifstream fs_slot(slot_file, std::ifstream::binary); std::ifstream fs_vec(vec_file, std::ifstream::binary); const size_t key_file_size_in_B = fs::file_size(key_file); const size_t slot_file_size_in_B = fs::file_size(slot_file); const size_t vec_file_size_in_B = fs::file_size(vec_file); const long long num_key = key_file_size_in_B / sizeof(T); const long long num_slot = slot_file_size_in_B / sizeof(T); const long long num_vec = vec_file_size_in_B / (sizeof(float) * embedding_vec_size); if (num_key != num_vec || num_key != num_slot || num_key != vocabulary_size) { CK_THROW_(Error_t::BrokenFile, "num_key != num_vec (num_slot) || num_key != vocabulary_size"); } key_vec.clear(); key_vec.reserve(num_key); slot_vec.clear(); slot_vec.reserve(num_key); vec_vec.clear(); vec_vec.reserve(num_vec * embedding_vec_size); fs_key.read(reinterpret_cast<char *>(key_vec.data()), key_file_size_in_B); fs_slot.read(reinterpret_cast<char *>(slot_vec.data()), slot_file_size_in_B); fs_vec.read(reinterpret_cast<char *>(vec_vec.data()), vec_file_size_in_B); }; std::vector<T> hash_table_key_from_cpu; std::vector<T> slot_id_from_cpu; std::vector<float> hash_table_value_from_cpu; load_sparse_model_to_map(hash_table_key_from_cpu, slot_id_from_cpu, hash_table_value_from_cpu, sparse_model_src); std::vector<T> hash_table_key_from_gpu; std::vector<T> slot_id_from_gpu; std::vector<float> hash_table_value_from_gpu; load_sparse_model_to_map(hash_table_key_from_gpu, slot_id_from_gpu, hash_table_value_from_gpu, sparse_model_dst); typedef struct TypeHashValue_ { float data[embedding_vec_size]; } TypeHashValue; ASSERT_TRUE(compare_hash_table(vocabulary_size, hash_table_key_from_gpu.data(), reinterpret_cast<TypeHashValue *>(hash_table_value_from_gpu.data()), hash_table_key_from_cpu.data(), reinterpret_cast<TypeHashValue *>(hash_table_value_from_cpu.data()), tolerance)); ASSERT_TRUE(compare_key_slot(vocabulary_size, hash_table_key_from_gpu.data(), slot_id_from_gpu.data(), hash_table_key_from_cpu.data(), slot_id_from_cpu.data())); test::mpi_finalize(); } } // namespace TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_1gpu) { train_and_test<float>({0}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_4gpu) { train_and_test<float>({0, 1, 2, 3}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_global_update_1gpu) { train_and_test<float>({0}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_global_update_4gpu) { train_and_test<float>({0, 1, 2, 3}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_1gpu) { train_and_test<__half>({0}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_4gpu) { train_and_test<__half>({0, 1, 2, 3}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_global_update_1gpu) { train_and_test<__half>({0}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_global_update_4gpu) { train_and_test<__half>({0, 1, 2, 3}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_one_hot_test, load_and_dump) { load_and_dump<float>({0}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_one_hot_test, load_and_dump_file_1gpu) { load_and_dump_file<float>({0}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_one_hot_test, load_and_dump_file_4gpu) { load_and_dump_file<float>({0, 1, 2, 3}, Optimizer_t::SGD, Update_t::Global); }
4301b3334075886c5360cab8fdfaea8eb7404c08.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <ctime> #define STB_IMAGE_WRITE_IMPLEMENTATION #include <stb/stb_image_write.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <Color.cuh> #include <Ray.cuh> #include <Camera.cuh> #include <Sphere.cuh> #include <HittableList.cuh> #include <Material.cuh> #include <helperUtils.cuh> #include <hiprand/hiprand_kernel.h> using namespace TinyRT; __device__ Color rayColor(const Ray& r, Hittable** hittable, const int maxDepth, hiprandState_t* const randStatePtr) { Ray curRay = r; Vec3 curAttenuation(1.0f, 1.0f, 1.0f); for (size_t i = 0; i < maxDepth; ++i) { HitRecord rec; if ((*hittable)->hit(curRay, 0.0001f, M_FLOAT_INFINITY, rec)) { Ray scattered; Vec3 attenuation; if (rec.matPtr->scatter(curRay, rec, attenuation, scattered, randStatePtr)) { curRay = scattered; curAttenuation *= attenuation; } else { return { 0.0, 0.0, 0.0 }; } } else { const Vec3 unitDirection = unitVec3(curRay.direction()); const double t = 0.5f * (unitDirection.y() + 1.0f); const Color background = (1.0f - t) * Color(1.0f, 1.0f, 1.0f) + t * Color(0.5f, 0.7f, 1.0f); return curAttenuation * background; } } // exceed max depth return { 0.0f, 0.0f, 0.0f }; } __global__ void renderInit(const int imageWidth, const int imageHeight, hiprandState_t* const randStateList) { const int col = threadIdx.x + blockIdx.x * blockDim.x; const int row = threadIdx.y + blockIdx.y * blockDim.y; if ((col >= imageWidth) || (row >= imageHeight)) return; const int idx = row * imageWidth + col; // init random numbers for anti-aliasing // each thread gets its own special seed, fixed sequence number, fixed offset hiprand_init(2020 + idx, 0, 0, &randStateList[idx]); } __global__ void render( Color* const pixelBuffer, const int imageWidth, const int imageHeight, Camera** const camera, hiprandState_t* const randStateList, const int samplesPerPixel, const int maxDepth, Hittable** const hittableWorldObjList) { const int col = threadIdx.x + blockIdx.x * blockDim.x; const int row = threadIdx.y + blockIdx.y * blockDim.y; if (col >= imageWidth || row >= imageHeight) return; const int idx = row * imageWidth + col; hiprandState_t randState = randStateList[idx]; Color pixelColor(0.0f, 0.0f, 0.0f); for (size_t s = 0; s < samplesPerPixel; ++s) { const auto u = (static_cast<float>(col) + randomFloat(&randState)) / static_cast<float>(imageWidth - 1); const auto v = 1.0 - (static_cast<float>(row) + randomFloat(&randState)) / static_cast<float>(imageHeight - 1); const Ray r = (*camera)->getRay(u, v); pixelColor += rayColor(r, hittableWorldObjList, maxDepth, &randState); } pixelColor /= samplesPerPixel; pixelColor.gammaCorrect(); pixelBuffer[idx] = pixelColor; } __global__ void createWorld(Camera** camera, Hittable** hittableList, Hittable** hittableWorldObjList) { if (threadIdx.x == 0 && blockIdx.x == 0) { *camera = new Camera(); hittableList[0] = new Sphere(Point3(0.0f, 0.0f, 1.0f), 0.5f, new Lambertian(new SolidColor(Color(0.7f, 0.3f, 0.3f)))); hittableList[1] = new Sphere(Point3(0.0f, -100.5f, 1.0f), 100.0f, new Lambertian(new SolidColor(Color(0.8f, 0.8f, 0.0f)))); hittableList[2] = new Sphere(Point3(1.0f, 0.0f, 1.0f), 0.5f, new Metal(Color(0.8f, 0.6f, 0.2f), 1.0f)); hittableList[3] = new Sphere(Point3(-1.0f, 0.0f, 1.0f), 0.5f, new Metal(Color(0.8f, 0.8f, 0.8f), 0.3f)); *hittableWorldObjList = new HittableList(hittableList, 4); } } __global__ void freeWorld(Camera** camera, Hittable** hittablePtrList, Hittable** hittableWorldObjList) { delete* camera; for (int i = 0; i < 4; ++i) { // delete random texture instances delete hittablePtrList[i]->matPtr()->texturePtr(); // delete random material instances delete hittablePtrList[i]->matPtr(); // delete object instances delete hittablePtrList[i]; } delete* hittableWorldObjList; } int main() { /* image config */ constexpr float aspectRatio = 16.0f / 9.0f; constexpr int imageWidth = 400; constexpr int imageHeight = static_cast<int>(imageWidth / aspectRatio); constexpr int samplesPerPixel = 100; constexpr int maxDepth = 50; /* image output file */ const std::string fileName("output.png"); /* thread block config */ constexpr int threadBlockWidth = 8; constexpr int threadBlockHeight = 8; // preparation constexpr int channelNum = 3; // rgb constexpr int pixelNum = imageWidth * imageHeight; constexpr size_t pixelBufferBytes = pixelNum * sizeof(Color); constexpr size_t randStateListBytes = pixelNum * sizeof(hiprandState_t); // allocate memory for pixel buffer const auto pixelBufferPtr = cudaManagedUniquePtr<Color>(pixelBufferBytes); // allocate random state const auto randStateListPtr = cudaUniquePtr<hiprandState_t>(randStateListBytes); // create world of hittable objects and the camera const auto cameraPtr = cudaUniquePtr<Camera*>(sizeof(Camera*)); const auto hittableListPtr = cudaUniquePtr<Hittable*>(4 * sizeof(Hittable*)); const auto hittableWorldObjListPtr = cudaUniquePtr<Hittable*>(sizeof(Hittable*)); hipLaunchKernelGGL(( createWorld), dim3(1), dim3(1), 0, 0, cameraPtr.get(), hittableListPtr.get(), hittableWorldObjListPtr.get()); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); // start timer const clock_t start = clock(); const dim3 blockDim(imageWidth / threadBlockWidth + 1, imageHeight / threadBlockHeight + 1); const dim3 threadDim(threadBlockWidth, threadBlockHeight); // render init hipLaunchKernelGGL(( renderInit), dim3(blockDim), dim3(threadDim), 0, 0, imageWidth, imageHeight, randStateListPtr.get()); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); // render the image into buffer hipLaunchKernelGGL(( render), dim3(blockDim), dim3(threadDim), 0, 0, pixelBufferPtr.get(), imageWidth, imageHeight, cameraPtr.get(), randStateListPtr.get(), samplesPerPixel, maxDepth, hittableWorldObjListPtr.get() ); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); // stop timer const clock_t stop = clock(); // measure rendering time const auto renderingMillisecond = stop - start; // other image writer arguments constexpr int imageSize = pixelNum * channelNum; constexpr size_t strideBytes = imageWidth * channelNum * sizeof(unsigned char); const std::unique_ptr<unsigned char[]> pixelDataPtr(new unsigned char[imageSize]); // store the pixel data into writing buffer as 8bit color for (int pixelIdx = 0, dataIdx = 0; pixelIdx < pixelNum; ++pixelIdx) { const Color color = pixelBufferPtr.get()[pixelIdx]; pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.r8bit()); pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.g8bit()); pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.b8bit()); } // print rendering time std::cout << "Complete!\n" << "The rendering took " << renderingMillisecond << "ms" << std::endl; // write pixel data to output file stbi_write_png(fileName.c_str(), imageWidth, imageHeight, channelNum, pixelDataPtr.get(), strideBytes); // free world of hittable objects hipLaunchKernelGGL(( freeWorld), dim3(1), dim3(1), 0, 0, cameraPtr.get(), hittableListPtr.get(), hittableWorldObjListPtr.get()); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); return 0; }
4301b3334075886c5360cab8fdfaea8eb7404c08.cu
#include <iostream> #include <fstream> #include <ctime> #define STB_IMAGE_WRITE_IMPLEMENTATION #include <stb/stb_image_write.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <Color.cuh> #include <Ray.cuh> #include <Camera.cuh> #include <Sphere.cuh> #include <HittableList.cuh> #include <Material.cuh> #include <helperUtils.cuh> #include <curand_kernel.h> using namespace TinyRT; __device__ Color rayColor(const Ray& r, Hittable** hittable, const int maxDepth, curandState* const randStatePtr) { Ray curRay = r; Vec3 curAttenuation(1.0f, 1.0f, 1.0f); for (size_t i = 0; i < maxDepth; ++i) { HitRecord rec; if ((*hittable)->hit(curRay, 0.0001f, M_FLOAT_INFINITY, rec)) { Ray scattered; Vec3 attenuation; if (rec.matPtr->scatter(curRay, rec, attenuation, scattered, randStatePtr)) { curRay = scattered; curAttenuation *= attenuation; } else { return { 0.0, 0.0, 0.0 }; } } else { const Vec3 unitDirection = unitVec3(curRay.direction()); const double t = 0.5f * (unitDirection.y() + 1.0f); const Color background = (1.0f - t) * Color(1.0f, 1.0f, 1.0f) + t * Color(0.5f, 0.7f, 1.0f); return curAttenuation * background; } } // exceed max depth return { 0.0f, 0.0f, 0.0f }; } __global__ void renderInit(const int imageWidth, const int imageHeight, curandState* const randStateList) { const int col = threadIdx.x + blockIdx.x * blockDim.x; const int row = threadIdx.y + blockIdx.y * blockDim.y; if ((col >= imageWidth) || (row >= imageHeight)) return; const int idx = row * imageWidth + col; // init random numbers for anti-aliasing // each thread gets its own special seed, fixed sequence number, fixed offset curand_init(2020 + idx, 0, 0, &randStateList[idx]); } __global__ void render( Color* const pixelBuffer, const int imageWidth, const int imageHeight, Camera** const camera, curandState* const randStateList, const int samplesPerPixel, const int maxDepth, Hittable** const hittableWorldObjList) { const int col = threadIdx.x + blockIdx.x * blockDim.x; const int row = threadIdx.y + blockIdx.y * blockDim.y; if (col >= imageWidth || row >= imageHeight) return; const int idx = row * imageWidth + col; curandState randState = randStateList[idx]; Color pixelColor(0.0f, 0.0f, 0.0f); for (size_t s = 0; s < samplesPerPixel; ++s) { const auto u = (static_cast<float>(col) + randomFloat(&randState)) / static_cast<float>(imageWidth - 1); const auto v = 1.0 - (static_cast<float>(row) + randomFloat(&randState)) / static_cast<float>(imageHeight - 1); const Ray r = (*camera)->getRay(u, v); pixelColor += rayColor(r, hittableWorldObjList, maxDepth, &randState); } pixelColor /= samplesPerPixel; pixelColor.gammaCorrect(); pixelBuffer[idx] = pixelColor; } __global__ void createWorld(Camera** camera, Hittable** hittableList, Hittable** hittableWorldObjList) { if (threadIdx.x == 0 && blockIdx.x == 0) { *camera = new Camera(); hittableList[0] = new Sphere(Point3(0.0f, 0.0f, 1.0f), 0.5f, new Lambertian(new SolidColor(Color(0.7f, 0.3f, 0.3f)))); hittableList[1] = new Sphere(Point3(0.0f, -100.5f, 1.0f), 100.0f, new Lambertian(new SolidColor(Color(0.8f, 0.8f, 0.0f)))); hittableList[2] = new Sphere(Point3(1.0f, 0.0f, 1.0f), 0.5f, new Metal(Color(0.8f, 0.6f, 0.2f), 1.0f)); hittableList[3] = new Sphere(Point3(-1.0f, 0.0f, 1.0f), 0.5f, new Metal(Color(0.8f, 0.8f, 0.8f), 0.3f)); *hittableWorldObjList = new HittableList(hittableList, 4); } } __global__ void freeWorld(Camera** camera, Hittable** hittablePtrList, Hittable** hittableWorldObjList) { delete* camera; for (int i = 0; i < 4; ++i) { // delete random texture instances delete hittablePtrList[i]->matPtr()->texturePtr(); // delete random material instances delete hittablePtrList[i]->matPtr(); // delete object instances delete hittablePtrList[i]; } delete* hittableWorldObjList; } int main() { /* image config */ constexpr float aspectRatio = 16.0f / 9.0f; constexpr int imageWidth = 400; constexpr int imageHeight = static_cast<int>(imageWidth / aspectRatio); constexpr int samplesPerPixel = 100; constexpr int maxDepth = 50; /* image output file */ const std::string fileName("output.png"); /* thread block config */ constexpr int threadBlockWidth = 8; constexpr int threadBlockHeight = 8; // preparation constexpr int channelNum = 3; // rgb constexpr int pixelNum = imageWidth * imageHeight; constexpr size_t pixelBufferBytes = pixelNum * sizeof(Color); constexpr size_t randStateListBytes = pixelNum * sizeof(curandState); // allocate memory for pixel buffer const auto pixelBufferPtr = cudaManagedUniquePtr<Color>(pixelBufferBytes); // allocate random state const auto randStateListPtr = cudaUniquePtr<curandState>(randStateListBytes); // create world of hittable objects and the camera const auto cameraPtr = cudaUniquePtr<Camera*>(sizeof(Camera*)); const auto hittableListPtr = cudaUniquePtr<Hittable*>(4 * sizeof(Hittable*)); const auto hittableWorldObjListPtr = cudaUniquePtr<Hittable*>(sizeof(Hittable*)); createWorld<<<1, 1>>>(cameraPtr.get(), hittableListPtr.get(), hittableWorldObjListPtr.get()); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); // start timer const clock_t start = clock(); const dim3 blockDim(imageWidth / threadBlockWidth + 1, imageHeight / threadBlockHeight + 1); const dim3 threadDim(threadBlockWidth, threadBlockHeight); // render init renderInit<<<blockDim, threadDim>>>(imageWidth, imageHeight, randStateListPtr.get()); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); // render the image into buffer render<<<blockDim, threadDim>>>( pixelBufferPtr.get(), imageWidth, imageHeight, cameraPtr.get(), randStateListPtr.get(), samplesPerPixel, maxDepth, hittableWorldObjListPtr.get() ); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); // stop timer const clock_t stop = clock(); // measure rendering time const auto renderingMillisecond = stop - start; // other image writer arguments constexpr int imageSize = pixelNum * channelNum; constexpr size_t strideBytes = imageWidth * channelNum * sizeof(unsigned char); const std::unique_ptr<unsigned char[]> pixelDataPtr(new unsigned char[imageSize]); // store the pixel data into writing buffer as 8bit color for (int pixelIdx = 0, dataIdx = 0; pixelIdx < pixelNum; ++pixelIdx) { const Color color = pixelBufferPtr.get()[pixelIdx]; pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.r8bit()); pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.g8bit()); pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.b8bit()); } // print rendering time std::cout << "Complete!\n" << "The rendering took " << renderingMillisecond << "ms" << std::endl; // write pixel data to output file stbi_write_png(fileName.c_str(), imageWidth, imageHeight, channelNum, pixelDataPtr.get(), strideBytes); // free world of hittable objects freeWorld<<<1, 1>>>(cameraPtr.get(), hittableListPtr.get(), hittableWorldObjListPtr.get()); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); return 0; }
1e4c3d15262740bf81903da91d89f40d6187c051.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) Chris Choy ([email protected]). * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to do * so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural * Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part * of the code. */ #include "math_functions.hpp" namespace minkowski { // CUBLAS, CUSPARSE assume all dense matrices to be col major template <> void gpu_gemm<float>(hipblasHandle_t handle, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float *A, const float *B, const float beta, float *C) { // Note that cublas follows (column-major) fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void gpu_gemm<double>(hipblasHandle_t handle, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double *A, const double *B, const double beta, double *C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } // CUBLAS, CUSPARSE assume all dense matrices to be col major // If op(B)=B, cusparse<t>csrmm2() is the same as cusparse<t>csrmm(); // otherwise, only op(A)=A is supported and the matrix type must be // HIPSPARSE_MATRIX_TYPE_GENERAL. // M: # row of A // N: # col of op(B) or C // K: # col of A template <> hipsparseStatus_t cusparse_csrmv<float>(hipsparseHandle_t handle, hipsparseOperation_t transA, int m, int n, int nnz, const float *alpha, const hipsparseMatDescr_t descrA, const float *csrValA, const int *csrRowPtrA, const int *csrColIndA, const float *x, const float *beta, float *y) { return hipsparseScsrmv(handle, transA, m, n, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, x, beta, y); }; template <> hipsparseStatus_t cusparse_csrmv<double>(hipsparseHandle_t handle, hipsparseOperation_t transA, int m, int n, int nnz, const double *alpha, const hipsparseMatDescr_t descrA, const double *csrValA, const int *csrRowPtrA, const int *csrColIndA, const double *x, const double *beta, double *y) { return hipsparseDcsrmv(handle, transA, m, n, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, x, beta, y); }; template <> hipsparseStatus_t cusparse_csrmm<float>(hipsparseHandle_t handle, hipsparseOperation_t transA, hipsparseOperation_t transB, int m, int n, int k, int nnz, const float *alpha, const hipsparseMatDescr_t descrA, const float *csrValA, const int *csrRowPtrA, const int *csrColIndA, const float *B, int ldb, const float *beta, float *C, int ldc) { return hipsparseScsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } template <> hipsparseStatus_t cusparse_csrmm<double>(hipsparseHandle_t handle, hipsparseOperation_t transA, hipsparseOperation_t transB, int m, int n, int k, int nnz, const double *alpha, const hipsparseMatDescr_t descrA, const double *csrValA, const int *csrRowPtrA, const int *csrColIndA, const double *B, int ldb, const double *beta, double *C, int ldc) { return hipsparseDcsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } template <typename Dtype> __global__ void addition_kernel(const int n, const Dtype *a, const Dtype *b, Dtype *y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <typename Dtype> __global__ void multiplication_kernel(const int n, const Dtype *a, const Dtype *b, Dtype *y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <typename Dtype> void gpu_addition(const int N, const Dtype *a, const Dtype *b, Dtype *y, hipStream_t stream) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( addition_kernel<Dtype>) , dim3(GET_BLOCKS(N)), dim3(CUDA_NUM_THREADS), 0, stream, N, a, b, y); } template void gpu_addition<float>(const int N, const float *a, const float *b, float *y, hipStream_t stream); template void gpu_addition<double>(const int N, const double *a, const double *b, double *y, hipStream_t stream); template <typename Dtype> void gpu_multiplication(const int N, const Dtype *a, const Dtype *b, Dtype *y, hipStream_t stream) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( multiplication_kernel<Dtype>) , dim3(GET_BLOCKS(N)), dim3(CUDA_NUM_THREADS), 0, stream, N, a, b, y); } template void gpu_multiplication<float>(const int N, const float *a, const float *b, float *y, hipStream_t stream); template void gpu_multiplication<double>(const int N, const double *a, const double *b, double *y, hipStream_t stream); template <typename Dtype> __global__ void col2row_major_kernel(const int n, const int nrows, const int ncols, const Dtype *colA, Dtype *rowA) { int i, j; CUDA_KERNEL_LOOP(index, n) { i = index % nrows; j = index / nrows; rowA[i * ncols + j] = colA[index]; } } template <typename Dtype> void col2row_major(const int nrows, const int ncols, const Dtype *colA, Dtype *rowA, hipStream_t stream) { hipLaunchKernelGGL(( col2row_major_kernel<Dtype>) , dim3(GET_BLOCKS(nrows * ncols)), dim3(CUDA_NUM_THREADS), 0, stream, nrows * ncols, nrows, ncols, colA, rowA); } template void col2row_major<float>(const int nrows, const int ncols, const float *colA, float *rowA, hipStream_t stream); template void col2row_major<double>(const int nrows, const int ncols, const double *colA, double *rowA, hipStream_t stream); template <typename Dtype> __global__ void row2col_major_kernel(const int n, const int nrows, const int ncols, const Dtype *rowA, Dtype *colA) { int i, j; CUDA_KERNEL_LOOP(index, n) { i = index / ncols; j = index % ncols; colA[i + j * nrows] = rowA[index]; } } template <typename Dtype> void row2col_major(const int nrows, const int ncols, const Dtype *colA, Dtype *rowA, hipStream_t stream) { hipLaunchKernelGGL(( row2col_major_kernel<Dtype>) , dim3(GET_BLOCKS(nrows * ncols)), dim3(CUDA_NUM_THREADS), 0, stream, nrows * ncols, nrows, ncols, colA, rowA); } template void row2col_major<float>(const int nrows, const int ncols, const float *colA, float *rowA, hipStream_t stream); template void row2col_major<double>(const int nrows, const int ncols, const double *colA, double *rowA, hipStream_t stream); // Sort (row, col) pairs row-major order. void sort_coo_gpu(hipsparseHandle_t handle, const int m, const int n, const int nnz, int *d_coo_row, int *d_coo_col) { size_t pBufferSizeInBytes = 0; void *pBuffer = NULL; int *P = NULL; // step 1: allocate buffer CUSPARSE_CHECK(hipsparseXcoosort_bufferSizeExt( handle, m, n, nnz, d_coo_row, d_coo_col, &pBufferSizeInBytes)); CUDA_CHECK(hipMalloc(&pBuffer, sizeof(char) * pBufferSizeInBytes)); // step 2: setup permutation vector P to identity CUDA_CHECK(hipMalloc((void **)&P, sizeof(int) * nnz)); CUSPARSE_CHECK(hipsparseCreateIdentityPermutation(handle, nnz, P)); // step 3: sort COO CUSPARSE_CHECK(hipsparseXcoosortByRow(handle, m, n, nnz, d_coo_row, d_coo_col, P, pBuffer)); hipFree(pBuffer); hipFree(P); } } // end namespace minkowski
1e4c3d15262740bf81903da91d89f40d6187c051.cu
/* Copyright (c) Chris Choy ([email protected]). * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to do * so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural * Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part * of the code. */ #include "math_functions.hpp" namespace minkowski { // CUBLAS, CUSPARSE assume all dense matrices to be col major template <> void gpu_gemm<float>(cublasHandle_t handle, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float *A, const float *B, const float beta, float *C) { // Note that cublas follows (column-major) fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void gpu_gemm<double>(cublasHandle_t handle, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double *A, const double *B, const double beta, double *C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } // CUBLAS, CUSPARSE assume all dense matrices to be col major // If op(B)=B, cusparse<t>csrmm2() is the same as cusparse<t>csrmm(); // otherwise, only op(A)=A is supported and the matrix type must be // CUSPARSE_MATRIX_TYPE_GENERAL. // M: # row of A // N: # col of op(B) or C // K: # col of A template <> cusparseStatus_t cusparse_csrmv<float>(cusparseHandle_t handle, cusparseOperation_t transA, int m, int n, int nnz, const float *alpha, const cusparseMatDescr_t descrA, const float *csrValA, const int *csrRowPtrA, const int *csrColIndA, const float *x, const float *beta, float *y) { return cusparseScsrmv(handle, transA, m, n, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, x, beta, y); }; template <> cusparseStatus_t cusparse_csrmv<double>(cusparseHandle_t handle, cusparseOperation_t transA, int m, int n, int nnz, const double *alpha, const cusparseMatDescr_t descrA, const double *csrValA, const int *csrRowPtrA, const int *csrColIndA, const double *x, const double *beta, double *y) { return cusparseDcsrmv(handle, transA, m, n, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, x, beta, y); }; template <> cusparseStatus_t cusparse_csrmm<float>(cusparseHandle_t handle, cusparseOperation_t transA, cusparseOperation_t transB, int m, int n, int k, int nnz, const float *alpha, const cusparseMatDescr_t descrA, const float *csrValA, const int *csrRowPtrA, const int *csrColIndA, const float *B, int ldb, const float *beta, float *C, int ldc) { return cusparseScsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } template <> cusparseStatus_t cusparse_csrmm<double>(cusparseHandle_t handle, cusparseOperation_t transA, cusparseOperation_t transB, int m, int n, int k, int nnz, const double *alpha, const cusparseMatDescr_t descrA, const double *csrValA, const int *csrRowPtrA, const int *csrColIndA, const double *B, int ldb, const double *beta, double *C, int ldc) { return cusparseDcsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } template <typename Dtype> __global__ void addition_kernel(const int n, const Dtype *a, const Dtype *b, Dtype *y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <typename Dtype> __global__ void multiplication_kernel(const int n, const Dtype *a, const Dtype *b, Dtype *y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <typename Dtype> void gpu_addition(const int N, const Dtype *a, const Dtype *b, Dtype *y, cudaStream_t stream) { // NOLINT_NEXT_LINE(whitespace/operators) addition_kernel<Dtype> <<<GET_BLOCKS(N), CUDA_NUM_THREADS, 0, stream>>>(N, a, b, y); } template void gpu_addition<float>(const int N, const float *a, const float *b, float *y, cudaStream_t stream); template void gpu_addition<double>(const int N, const double *a, const double *b, double *y, cudaStream_t stream); template <typename Dtype> void gpu_multiplication(const int N, const Dtype *a, const Dtype *b, Dtype *y, cudaStream_t stream) { // NOLINT_NEXT_LINE(whitespace/operators) multiplication_kernel<Dtype> <<<GET_BLOCKS(N), CUDA_NUM_THREADS, 0, stream>>>(N, a, b, y); } template void gpu_multiplication<float>(const int N, const float *a, const float *b, float *y, cudaStream_t stream); template void gpu_multiplication<double>(const int N, const double *a, const double *b, double *y, cudaStream_t stream); template <typename Dtype> __global__ void col2row_major_kernel(const int n, const int nrows, const int ncols, const Dtype *colA, Dtype *rowA) { int i, j; CUDA_KERNEL_LOOP(index, n) { i = index % nrows; j = index / nrows; rowA[i * ncols + j] = colA[index]; } } template <typename Dtype> void col2row_major(const int nrows, const int ncols, const Dtype *colA, Dtype *rowA, cudaStream_t stream) { col2row_major_kernel<Dtype> <<<GET_BLOCKS(nrows * ncols), CUDA_NUM_THREADS, 0, stream>>>( nrows * ncols, nrows, ncols, colA, rowA); } template void col2row_major<float>(const int nrows, const int ncols, const float *colA, float *rowA, cudaStream_t stream); template void col2row_major<double>(const int nrows, const int ncols, const double *colA, double *rowA, cudaStream_t stream); template <typename Dtype> __global__ void row2col_major_kernel(const int n, const int nrows, const int ncols, const Dtype *rowA, Dtype *colA) { int i, j; CUDA_KERNEL_LOOP(index, n) { i = index / ncols; j = index % ncols; colA[i + j * nrows] = rowA[index]; } } template <typename Dtype> void row2col_major(const int nrows, const int ncols, const Dtype *colA, Dtype *rowA, cudaStream_t stream) { row2col_major_kernel<Dtype> <<<GET_BLOCKS(nrows * ncols), CUDA_NUM_THREADS, 0, stream>>>( nrows * ncols, nrows, ncols, colA, rowA); } template void row2col_major<float>(const int nrows, const int ncols, const float *colA, float *rowA, cudaStream_t stream); template void row2col_major<double>(const int nrows, const int ncols, const double *colA, double *rowA, cudaStream_t stream); // Sort (row, col) pairs row-major order. void sort_coo_gpu(cusparseHandle_t handle, const int m, const int n, const int nnz, int *d_coo_row, int *d_coo_col) { size_t pBufferSizeInBytes = 0; void *pBuffer = NULL; int *P = NULL; // step 1: allocate buffer CUSPARSE_CHECK(cusparseXcoosort_bufferSizeExt( handle, m, n, nnz, d_coo_row, d_coo_col, &pBufferSizeInBytes)); CUDA_CHECK(cudaMalloc(&pBuffer, sizeof(char) * pBufferSizeInBytes)); // step 2: setup permutation vector P to identity CUDA_CHECK(cudaMalloc((void **)&P, sizeof(int) * nnz)); CUSPARSE_CHECK(cusparseCreateIdentityPermutation(handle, nnz, P)); // step 3: sort COO CUSPARSE_CHECK(cusparseXcoosortByRow(handle, m, n, nnz, d_coo_row, d_coo_col, P, pBuffer)); cudaFree(pBuffer); cudaFree(P); } } // end namespace minkowski
25690ee6f21d2b1eb26e2b1021a6e9aa582b98fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void calc_avg_activation_kernel(float *src, float *dst, int size, int channels, int batches) { int i = blockIdx.x * blockDim.x + threadIdx.x; int xy = i % size; int b = i / size; if (i < size*batches) { dst[i] = 0; for (int c = 0; c < channels; ++c) { dst[i] += src[xy + size*(c + channels*b)]; } dst[i] = dst[i] / channels; } }
25690ee6f21d2b1eb26e2b1021a6e9aa582b98fe.cu
#include "includes.h" __global__ void calc_avg_activation_kernel(float *src, float *dst, int size, int channels, int batches) { int i = blockIdx.x * blockDim.x + threadIdx.x; int xy = i % size; int b = i / size; if (i < size*batches) { dst[i] = 0; for (int c = 0; c < channels; ++c) { dst[i] += src[xy + size*(c + channels*b)]; } dst[i] = dst[i] / channels; } }
f0202f7022c0becdf4be1250a7951d99a81321a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ // includes, kernels #include <assert.h> #define NUM_BANKS 32 #define LOG_NUM_BANKS 5 // Lab4: You can use any other block size you wish. #define BLOCK_SIZE 256 // Lab4: Host Helper Functions (allocate your own data structure...) __global__ void scan(float *g_odata, float *g_idata, const int n); void prescanArray(float *outArray, float *inArray, int numElements); // Lab4: Device Functions // Lab4: Kernel Functions // n: block size. one thread can handle two elements __global__ void scan(float *g_odata, float *g_idata, const int n) { extern __shared__ float temp[]; // allocated on invocation: only needs to be as big as num threads in block int thid = threadIdx.x; //thread id in block int gid = blockIdx.x*blockDim.x + thid; //global id int offset = 1; //Loop all elements partitioned to a block in input array //Every thread handles two elements temp[2*thid] = g_idata[2*thid]; // load input into shared memory temp[2*thid+1] = g_idata[2*thid+1]; // load input into shared memory. // This is exclusive scan, so shift right by one and set first element to 0 for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[2*thid] = temp[2*thid]; // write results to device memory g_odata[2*thid+1] = temp[2*thid+1]; } // **===-------- Lab4: Modify the body of this function -----------===** // You may need to make multiple kernel calls, make your own kernel // function in this file, and then call them from here. void prescanArray(float *outArray, float *inArray, int numElements) { // Divide input array into blocks // Remember that each thread can handle two elements // BLOCK_SIZE is set above as a constnt // int numBlocks = ceil(numElements/BLOCK_SIZE); // Allocate global device memory for arrays to communicate sum data // hipMalloc( // int nepb = num_elements/gridDim.x; //assuming input array is a power of 2 already dim3 dimGrid(1); dim3 dimBlock(BLOCK_SIZE); //BEV: added all below this point //dim3 dimGrid(numBlocks); //dim3 dimBlock(BLOCK_SIZE); hipLaunchKernelGGL(( scan), dim3(dimGrid), dim3(dimBlock), 2*sizeof(float)*numElements+1, 0, outArray, inArray, numElements); //scan<<<dimGrid, dimBlock, 2*sizeof(float)*numElements+1>>>(outArray, inArray, BLOCK_SIZE); } // **===-----------------------------------------------------------===** #endif // _PRESCAN_CU_
f0202f7022c0becdf4be1250a7951d99a81321a3.cu
#ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ // includes, kernels #include <assert.h> #define NUM_BANKS 32 #define LOG_NUM_BANKS 5 // Lab4: You can use any other block size you wish. #define BLOCK_SIZE 256 // Lab4: Host Helper Functions (allocate your own data structure...) __global__ void scan(float *g_odata, float *g_idata, const int n); void prescanArray(float *outArray, float *inArray, int numElements); // Lab4: Device Functions // Lab4: Kernel Functions // n: block size. one thread can handle two elements __global__ void scan(float *g_odata, float *g_idata, const int n) { extern __shared__ float temp[]; // allocated on invocation: only needs to be as big as num threads in block int thid = threadIdx.x; //thread id in block int gid = blockIdx.x*blockDim.x + thid; //global id int offset = 1; //Loop all elements partitioned to a block in input array //Every thread handles two elements temp[2*thid] = g_idata[2*thid]; // load input into shared memory temp[2*thid+1] = g_idata[2*thid+1]; // load input into shared memory. // This is exclusive scan, so shift right by one and set first element to 0 for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[2*thid] = temp[2*thid]; // write results to device memory g_odata[2*thid+1] = temp[2*thid+1]; } // **===-------- Lab4: Modify the body of this function -----------===** // You may need to make multiple kernel calls, make your own kernel // function in this file, and then call them from here. void prescanArray(float *outArray, float *inArray, int numElements) { // Divide input array into blocks // Remember that each thread can handle two elements // BLOCK_SIZE is set above as a constnt // int numBlocks = ceil(numElements/BLOCK_SIZE); // Allocate global device memory for arrays to communicate sum data // cudaMalloc( // int nepb = num_elements/gridDim.x; //assuming input array is a power of 2 already dim3 dimGrid(1); dim3 dimBlock(BLOCK_SIZE); //BEV: added all below this point //dim3 dimGrid(numBlocks); //dim3 dimBlock(BLOCK_SIZE); scan<<<dimGrid, dimBlock, 2*sizeof(float)*numElements+1>>>(outArray, inArray, numElements); //scan<<<dimGrid, dimBlock, 2*sizeof(float)*numElements+1>>>(outArray, inArray, BLOCK_SIZE); } // **===-----------------------------------------------------------===** #endif // _PRESCAN_CU_
8272f4e885b9a4bff8242e5bd0904d1d56351d10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/optimizers/adam.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { template <typename TensorDataType> __global__ void adam_noncontiguous_kernel(size_t height, size_t width, TensorDataType correction, TensorDataType eps, TensorDataType beta1, TensorDataType beta2, TensorDataType * __restrict__ values, size_t values_ldim, const TensorDataType * __restrict__ gradient, size_t gradient_ldim, TensorDataType * __restrict__ moment1, size_t moment1_ldim, TensorDataType * __restrict__ moment2, size_t moment2_ldim) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; if (gid < height * width) { const auto& row = gid % height; const auto& col = gid / height; const auto& g = gradient[row + col * gradient_ldim] + eps; if (cuda::isinf(g) || cuda::isnan(g)) { return; } auto& m1 = moment1[row + col * moment1_ldim]; auto& m2 = moment2[row + col * moment2_ldim]; auto& x = values[row + col * values_ldim]; m1 = beta1 * m1 + (TensorDataType(1) - beta1) * g; m2 = beta2 * m2 + (TensorDataType(1) - beta2) * g * g; x -= correction * m1 / (gpu_lib::sqrt(m2) + eps); } } template <typename TensorDataType> __global__ void adam_contiguous_kernel(size_t size, TensorDataType correction, TensorDataType eps, TensorDataType beta1, TensorDataType beta2, TensorDataType * __restrict__ values, const TensorDataType * __restrict__ gradient, TensorDataType * __restrict__ moment1, TensorDataType * __restrict__ moment2) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; if (gid < size) { const auto& g = gradient[gid] + eps; if (cuda::isinf(g) || cuda::isnan(g)) { return; } auto& m1 = moment1[gid]; auto& m2 = moment2[gid]; auto& x = values[gid]; m1 = beta1 * m1 + (TensorDataType(1) - beta1) * g; m2 = beta2 * m2 + (TensorDataType(1) - beta2) * g * g; x -= correction * m1 / (gpu_lib::sqrt(m2) + eps); } } } // namespace template <typename TensorDataType> void adam<TensorDataType>::step_compute_gpu(AbsDistMatrixType& values, const AbsDistMatrixType& gradient, const TensorDataType& correction) { // Get matrix dimensions const size_t local_height = values.LocalHeight(); const size_t local_width = values.LocalWidth(); const size_t local_size = local_height * local_width; if (local_size <= 0) { return; } // Launch GPU kernel constexpr size_t block_size = 256; const size_t grid_size = (local_size + block_size - 1) / block_size; auto multisync = El::MakeMultiSync(gpu::get_sync_info(values), gpu::get_sync_info(gradient)); if (values.Contiguous() && gradient.Contiguous() && m_moment1->Contiguous() && m_moment2->Contiguous()) { hydrogen::gpu::LaunchKernel( adam_contiguous_kernel<TensorDataType>, grid_size, block_size, 0, multisync, local_size, correction, m_eps, m_beta1, m_beta2, values.Buffer(), gradient.LockedBuffer(), m_moment1->Buffer(), m_moment2->Buffer()); } else { hydrogen::gpu::LaunchKernel( adam_noncontiguous_kernel<TensorDataType>, grid_size, block_size, 0, multisync, local_height, local_width, correction, m_eps, m_beta1, m_beta2, values.Buffer(), values.LDim(), gradient.LockedBuffer(), gradient.LDim(), m_moment1->Buffer(), m_moment1->LDim(), m_moment2->Buffer(), m_moment2->LDim()); } } #ifdef LBANN_HAS_HALF template <> void adam<cpu_fp16>::step_compute_gpu(AbsDistMatrixType&, const AbsDistMatrixType&, const cpu_fp16&) { LBANN_ERROR("Can't call this function with cpu_fp16!"); } #endif // LBANN_HAS_HALF #define PROTO(T) \ template void adam<T>::step_compute_gpu( \ El::AbstractDistMatrix<T>&, \ const El::AbstractDistMatrix<T>&, const T&) #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
8272f4e885b9a4bff8242e5bd0904d1d56351d10.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/optimizers/adam.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { template <typename TensorDataType> __global__ void adam_noncontiguous_kernel(size_t height, size_t width, TensorDataType correction, TensorDataType eps, TensorDataType beta1, TensorDataType beta2, TensorDataType * __restrict__ values, size_t values_ldim, const TensorDataType * __restrict__ gradient, size_t gradient_ldim, TensorDataType * __restrict__ moment1, size_t moment1_ldim, TensorDataType * __restrict__ moment2, size_t moment2_ldim) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; if (gid < height * width) { const auto& row = gid % height; const auto& col = gid / height; const auto& g = gradient[row + col * gradient_ldim] + eps; if (cuda::isinf(g) || cuda::isnan(g)) { return; } auto& m1 = moment1[row + col * moment1_ldim]; auto& m2 = moment2[row + col * moment2_ldim]; auto& x = values[row + col * values_ldim]; m1 = beta1 * m1 + (TensorDataType(1) - beta1) * g; m2 = beta2 * m2 + (TensorDataType(1) - beta2) * g * g; x -= correction * m1 / (gpu_lib::sqrt(m2) + eps); } } template <typename TensorDataType> __global__ void adam_contiguous_kernel(size_t size, TensorDataType correction, TensorDataType eps, TensorDataType beta1, TensorDataType beta2, TensorDataType * __restrict__ values, const TensorDataType * __restrict__ gradient, TensorDataType * __restrict__ moment1, TensorDataType * __restrict__ moment2) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; if (gid < size) { const auto& g = gradient[gid] + eps; if (cuda::isinf(g) || cuda::isnan(g)) { return; } auto& m1 = moment1[gid]; auto& m2 = moment2[gid]; auto& x = values[gid]; m1 = beta1 * m1 + (TensorDataType(1) - beta1) * g; m2 = beta2 * m2 + (TensorDataType(1) - beta2) * g * g; x -= correction * m1 / (gpu_lib::sqrt(m2) + eps); } } } // namespace template <typename TensorDataType> void adam<TensorDataType>::step_compute_gpu(AbsDistMatrixType& values, const AbsDistMatrixType& gradient, const TensorDataType& correction) { // Get matrix dimensions const size_t local_height = values.LocalHeight(); const size_t local_width = values.LocalWidth(); const size_t local_size = local_height * local_width; if (local_size <= 0) { return; } // Launch GPU kernel constexpr size_t block_size = 256; const size_t grid_size = (local_size + block_size - 1) / block_size; auto multisync = El::MakeMultiSync(gpu::get_sync_info(values), gpu::get_sync_info(gradient)); if (values.Contiguous() && gradient.Contiguous() && m_moment1->Contiguous() && m_moment2->Contiguous()) { hydrogen::gpu::LaunchKernel( adam_contiguous_kernel<TensorDataType>, grid_size, block_size, 0, multisync, local_size, correction, m_eps, m_beta1, m_beta2, values.Buffer(), gradient.LockedBuffer(), m_moment1->Buffer(), m_moment2->Buffer()); } else { hydrogen::gpu::LaunchKernel( adam_noncontiguous_kernel<TensorDataType>, grid_size, block_size, 0, multisync, local_height, local_width, correction, m_eps, m_beta1, m_beta2, values.Buffer(), values.LDim(), gradient.LockedBuffer(), gradient.LDim(), m_moment1->Buffer(), m_moment1->LDim(), m_moment2->Buffer(), m_moment2->LDim()); } } #ifdef LBANN_HAS_HALF template <> void adam<cpu_fp16>::step_compute_gpu(AbsDistMatrixType&, const AbsDistMatrixType&, const cpu_fp16&) { LBANN_ERROR("Can't call this function with cpu_fp16!"); } #endif // LBANN_HAS_HALF #define PROTO(T) \ template void adam<T>::step_compute_gpu( \ El::AbstractDistMatrix<T>&, \ const El::AbstractDistMatrix<T>&, const T&) #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
NonogramLineDevice.hip
// !!! This is a file automatically generated by hipify!!! // // Created by Benjamin Huang on 11/19/2018. // #include "NonogramLineDevice.h" // #define DEBUG __device__ bool ngline_dev_cell_solve(NonogramLineDevice *L, Board2DDevice *B, NonogramColor color, unsigned i) { if (L->data[i] != color) { unsigned x, y; if (L->line_is_row) { y = L->line_index; x = i; } else { x = L->line_index; y = i; } board2d_dev_elem_set(B, x, y, color); #ifdef DISP mvaddch(y, x, ngramColorToChar(color)); refresh(); #endif return true; } return false; } __device__ void ngline_init_dev(NonogramLineDevice *L) { if (L->constr_len == 0) { return; } // Find the topmost line configuration for each run. unsigned topSum = 0; for (unsigned i = 0; i < L->constr_len; i++) { topSum += L->constr[i]; L->b_runs[i].topEnd = topSum; topSum++; } #ifndef __NVCC__ #ifdef DEBUG printf("topSum=%d\t", topSum); #endif #endif // Find the bottommost line configuration for each run. unsigned botSum = L->len; for (unsigned i = L->constr_len - 1; i < L->constr_len; i--) { botSum -= L->constr[i]; L->b_runs[i].botStart = botSum; botSum--; } #ifndef __NVCC__ #ifdef DEBUG printf("botSum=%d\n", botSum); #endif #endif } __device__ __inline__ bool ngline_dev_run_top_adjust(NonogramLineDevice *L, unsigned char &topEnd, unsigned line_len, unsigned run_len) { if (topEnd < line_len && L->data[topEnd] == NGCOLOR_BLACK) { topEnd++; return true; } for (unsigned i = topEnd; i > topEnd - run_len; i--) { if (L->data[i - 1] == NGCOLOR_WHITE) { topEnd = i + run_len; return true; } } if (topEnd > run_len && L->data[topEnd - run_len - 1] == NGCOLOR_BLACK) { topEnd++; return true; } return false; } __device__ __inline__ bool ngline_dev_run_bot_adjust(NonogramLineDevice *L, unsigned char &botStart, unsigned line_len, unsigned run_len) { if (botStart > 0 && L->data[botStart - 1] == NGCOLOR_BLACK) { botStart--; return true; } for (unsigned i = botStart; i < botStart + run_len; i++) { if (L->data[i] == NGCOLOR_WHITE) { botStart = i - run_len; return true; } } if (botStart + run_len < line_len && L->data[botStart + run_len] == NGCOLOR_BLACK) { botStart--; return true; } return false; } __device__ __inline__ void ngline_dev_run_top_prop(NonogramLineDevice *L) { for (unsigned ri = 1; ri < L->constr_len; ri++) { L->b_runs[ri].topEnd = dev_max(L->b_runs[ri].topEnd, L->b_runs[ri-1].topEnd + L->constr[ri] + 1); } } __device__ __inline__ void ngline_dev_run_bot_prop(NonogramLineDevice *L) { for (unsigned ri = L->constr_len - 2; ri < L->constr_len; ri--) { L->b_runs[ri].botStart = dev_min(L->b_runs[ri].botStart, L->b_runs[ri+1].botStart - L->constr[ri] - 1); } } __device__ __inline__ bool ngline_dev_run_fill_black(NonogramLineDevice *L, Board2DDevice *B, const BRun *R, unsigned run_len) { bool dirty = false; for (unsigned i = R->botStart; i < R->topEnd; i++) { dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_BLACK, i); } if (R->topEnd == R->botStart + run_len) { if (R->topEnd < L->len) dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_WHITE, R->topEnd); if (R->botStart > 0) dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_WHITE, R->botStart - 1); } return dirty; } __device__ __inline__ bool ngline_dev_run_fill_white(NonogramLineDevice *L, Board2DDevice *B, unsigned ri) { // ri is the index of the black run after the white area bool dirty = false; unsigned prevBotEnd; unsigned topStart; if (ri == 0) { prevBotEnd = 0; } else { prevBotEnd = L->b_runs[ri - 1].botStart + L->constr[ri - 1]; } if (ri == L->constr_len) { topStart = L->len; } else { topStart = L->b_runs[ri].topEnd - L->constr[ri]; } for (unsigned i = prevBotEnd; i < topStart; i++) { dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_WHITE, i); } return dirty; } __device__ bool ngline_dev_run_solve(NonogramLineDevice *L, Board2DDevice *B) { bool dirty = false; unsigned line_len = L->len; // Adjust the possible start and end points of the runs for (unsigned ri = 0; ri < L->constr_len; ri++) { while(ngline_dev_run_top_adjust(L, L->b_runs[ri].topEnd, line_len, L->constr[ri])); while(ngline_dev_run_bot_adjust(L, L->b_runs[ri].botStart, line_len, L->constr[ri])); } // Propagate changes - one thread only! ngline_dev_run_top_prop(L); ngline_dev_run_bot_prop(L); // Fill overlaps for (unsigned ri = 0; ri < L->constr_len; ri++) { dirty |= ngline_dev_run_fill_white(L, B, ri); dirty |= ngline_dev_run_fill_black(L, B, &L->b_runs[ri], L->constr[ri]); } dirty |= ngline_dev_run_fill_white(L, B, L->constr_len); return dirty; } __device__ bool ngline_dev_block_solve(NonogramLineDevice *L, Board2DDevice *B) { unsigned block_topStart = 0; unsigned block_start; unsigned block_end; unsigned block_botEnd = 0; unsigned ri_first = 0; unsigned ri_last = 0; unsigned i = 0; bool solved = true; bool dirty = false; while (i < L->len) { if (L->data[i] == NGCOLOR_UNKNOWN) { // No blocks to solve here i++; solved = false; continue; } if (L->data[i] == NGCOLOR_WHITE) { // White blocks, nothing to solve i++; block_topStart = i; continue; } // At this point we have reached a shaded block block_start = i; while (L->data[i] == NGCOLOR_BLACK) { // Find the end of the shaded block i++; if (i == L->len) break; } block_end = i; block_botEnd = i; // Find the next white block (to determine the maximum possible extent of the shaded block while (block_botEnd < L->len && L->data[block_botEnd] != NGCOLOR_WHITE) block_botEnd++; // Determine the minimum and maximum length of this block unsigned block_len_min = block_end - block_start; unsigned block_len_max = block_botEnd - block_topStart; // The number of runs that will fit this block unsigned run_fit_count = 0; // next three are not valid if run_fit_count == 0 unsigned run_fit_index = 0; // The bottom-most fitting run unsigned run_len_min = L->len; // The minimum length of all the fitting runs unsigned run_len_max = 0; // The maximum length of all the fitting runs // Get the run valid run indexes while (L->b_runs[ri_first].botStart + L->constr[ri_first] < block_end) ri_first++; ri_last = ri_first; while (ri_last < L->constr_len && L->b_runs[ri_last].topEnd <= block_start + L->constr[ri_last]) { unsigned run_len = L->constr[ri_last]; // Check that the run length will fit the block if (block_len_min <= run_len && run_len <= block_len_max) { if (run_fit_count == 0) { // Make the topmost possible run start no later than this run if (L->b_runs[ri_last].botStart > block_start) { L->b_runs[ri_last].botStart = block_start; dirty = true; } run_len_min = run_len; run_len_max = run_len; } else { run_len_min = dev_min(run_len, run_len_min); run_len_max = dev_max(run_len, run_len_max); } run_fit_count++; run_fit_index = ri_last; } ri_last++; } // TODO If checking for contradiction, must check no possible runs // Make the bottommost possible run start no earlier than this run if (L->b_runs[run_fit_index].topEnd < block_end){ L->b_runs[run_fit_index].topEnd = block_end; dirty = true; } while (block_end < block_topStart + run_len_min) { // If the minimum run length puts the last cell in the shortest possible run further right // than the last cell in the block, fill up in between. dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_BLACK, block_end); block_end++; } while (block_start > block_botEnd - run_len_min) { // If the minimum run length puts the first cell in the shortest possible run further left // than the first cell in the block, fill up in between. block_start--; dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_BLACK, block_start); } if (block_len_min == run_len_max) { // If the block is already the maximum run length, then fill up white around it. if (block_end != L->len) dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_WHITE, block_end); if (block_start != 0) dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_WHITE, block_start - 1); } } if (solved) L->solved = true; return dirty; } __device__ void ngline_dev_mutableonly_copy(NonogramLineDevice *L_dst, const NonogramLineDevice *L_src) { L_dst->solved = L_src->solved; for (unsigned i = 0; i < L_src->constr_len; i++) { L_dst->b_runs[i] = L_src->b_runs[i]; } } bool ng_linearr_init_host(unsigned w, unsigned h, NonogramLineDevice **Ls) { // Allocate the array of line solver structs unsigned Ls_len = w + h; size_t Ls_size = sizeof(NonogramLineDevice) * Ls_len; NonogramLineDevice *Ls_tmp = (NonogramLineDevice *)malloc(Ls_size); if (Ls_tmp == NULL) { fprintf(stderr, "Failed to allocate host line array\n"); return false; } for (unsigned i = 0; i < h; i++) { Ls_tmp[i].constr_len = 0; Ls_tmp[i].line_index = i; Ls_tmp[i].line_is_row = true; Ls_tmp[i].len = w; Ls_tmp[i].solved = false; } for (unsigned i = 0; i < w; i++) { Ls_tmp[i + h].constr_len = 0; Ls_tmp[i + h].line_index = i; Ls_tmp[i + h].line_is_row = false; Ls_tmp[i + h].len = h; Ls_tmp[i + h].solved = false; } *Ls = Ls_tmp; return true; } #ifdef __NVCC__ NonogramLineDevice *ng_linearr_init_dev(unsigned w, unsigned h, NonogramLineDevice *Ls_host) { void *Ls_dev; size_t Ls_size = sizeof(NonogramLineDevice) * (w + h); cudaCheckError(hipMalloc(&Ls_dev, Ls_size)); cudaCheckError(hipMemcpy(Ls_dev, (void *)Ls_host, Ls_size, hipMemcpyHostToDevice)); return (NonogramLineDevice *)Ls_dev; } #endif #ifdef __NVCC__ void ng_linearr_free_dev(NonogramLineDevice *Ls_dev) { cudaCheckError(hipFree(Ls_dev)); } #endif NonogramLineDevice *ng_linearr_deepcopy_host(NonogramLineDevice *Ls, unsigned w, unsigned h) { unsigned Ls_len = w + h; size_t Ls_size = sizeof(NonogramLineDevice) * Ls_len; NonogramLineDevice *Ls_copy = (NonogramLineDevice *)malloc(Ls_size); if (Ls_copy == NULL) { fprintf(stderr, "Failed to allocate copy of line array\n"); return NULL; } memcpy((void *)Ls_copy, (void *)Ls, Ls_size); return Ls_copy; } #ifdef __NVCC__ NonogramLineDevice *ng_linearr_deepcopy_dev_double(NonogramLineDevice *Ls, unsigned Ls_size) { NonogramLineDevice *Ls_dcopy; cudaCheckError(hipMalloc((void **)&Ls_dcopy, 2 * Ls_size)); cudaCheckError(hipMemcpy((void *)Ls_dcopy, (void *)Ls, Ls_size, hipMemcpyDeviceToDevice)); char *Ls_copy2 = ((char *) Ls_dcopy) + Ls_size; cudaCheckError(hipMemcpy((void *)Ls_copy2, (void *)Ls, Ls_size, hipMemcpyDeviceToDevice)); return Ls_dcopy; } #endif void ng_linearr_board_change(NonogramLineDevice *Ls, Board2DDevice *B) { for (unsigned i = 0; i < B->h; i++) { Ls[i].data = board2d_host_row_ptr_get(B, i); } for (unsigned i = 0; i < B->w; i++) { Ls[i + B->h].data = board2d_host_col_ptr_get(B, i); } }
NonogramLineDevice.cu
// // Created by Benjamin Huang on 11/19/2018. // #include "NonogramLineDevice.h" // #define DEBUG __device__ bool ngline_dev_cell_solve(NonogramLineDevice *L, Board2DDevice *B, NonogramColor color, unsigned i) { if (L->data[i] != color) { unsigned x, y; if (L->line_is_row) { y = L->line_index; x = i; } else { x = L->line_index; y = i; } board2d_dev_elem_set(B, x, y, color); #ifdef DISP mvaddch(y, x, ngramColorToChar(color)); refresh(); #endif return true; } return false; } __device__ void ngline_init_dev(NonogramLineDevice *L) { if (L->constr_len == 0) { return; } // Find the topmost line configuration for each run. unsigned topSum = 0; for (unsigned i = 0; i < L->constr_len; i++) { topSum += L->constr[i]; L->b_runs[i].topEnd = topSum; topSum++; } #ifndef __NVCC__ #ifdef DEBUG printf("topSum=%d\t", topSum); #endif #endif // Find the bottommost line configuration for each run. unsigned botSum = L->len; for (unsigned i = L->constr_len - 1; i < L->constr_len; i--) { botSum -= L->constr[i]; L->b_runs[i].botStart = botSum; botSum--; } #ifndef __NVCC__ #ifdef DEBUG printf("botSum=%d\n", botSum); #endif #endif } __device__ __inline__ bool ngline_dev_run_top_adjust(NonogramLineDevice *L, unsigned char &topEnd, unsigned line_len, unsigned run_len) { if (topEnd < line_len && L->data[topEnd] == NGCOLOR_BLACK) { topEnd++; return true; } for (unsigned i = topEnd; i > topEnd - run_len; i--) { if (L->data[i - 1] == NGCOLOR_WHITE) { topEnd = i + run_len; return true; } } if (topEnd > run_len && L->data[topEnd - run_len - 1] == NGCOLOR_BLACK) { topEnd++; return true; } return false; } __device__ __inline__ bool ngline_dev_run_bot_adjust(NonogramLineDevice *L, unsigned char &botStart, unsigned line_len, unsigned run_len) { if (botStart > 0 && L->data[botStart - 1] == NGCOLOR_BLACK) { botStart--; return true; } for (unsigned i = botStart; i < botStart + run_len; i++) { if (L->data[i] == NGCOLOR_WHITE) { botStart = i - run_len; return true; } } if (botStart + run_len < line_len && L->data[botStart + run_len] == NGCOLOR_BLACK) { botStart--; return true; } return false; } __device__ __inline__ void ngline_dev_run_top_prop(NonogramLineDevice *L) { for (unsigned ri = 1; ri < L->constr_len; ri++) { L->b_runs[ri].topEnd = dev_max(L->b_runs[ri].topEnd, L->b_runs[ri-1].topEnd + L->constr[ri] + 1); } } __device__ __inline__ void ngline_dev_run_bot_prop(NonogramLineDevice *L) { for (unsigned ri = L->constr_len - 2; ri < L->constr_len; ri--) { L->b_runs[ri].botStart = dev_min(L->b_runs[ri].botStart, L->b_runs[ri+1].botStart - L->constr[ri] - 1); } } __device__ __inline__ bool ngline_dev_run_fill_black(NonogramLineDevice *L, Board2DDevice *B, const BRun *R, unsigned run_len) { bool dirty = false; for (unsigned i = R->botStart; i < R->topEnd; i++) { dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_BLACK, i); } if (R->topEnd == R->botStart + run_len) { if (R->topEnd < L->len) dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_WHITE, R->topEnd); if (R->botStart > 0) dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_WHITE, R->botStart - 1); } return dirty; } __device__ __inline__ bool ngline_dev_run_fill_white(NonogramLineDevice *L, Board2DDevice *B, unsigned ri) { // ri is the index of the black run after the white area bool dirty = false; unsigned prevBotEnd; unsigned topStart; if (ri == 0) { prevBotEnd = 0; } else { prevBotEnd = L->b_runs[ri - 1].botStart + L->constr[ri - 1]; } if (ri == L->constr_len) { topStart = L->len; } else { topStart = L->b_runs[ri].topEnd - L->constr[ri]; } for (unsigned i = prevBotEnd; i < topStart; i++) { dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_WHITE, i); } return dirty; } __device__ bool ngline_dev_run_solve(NonogramLineDevice *L, Board2DDevice *B) { bool dirty = false; unsigned line_len = L->len; // Adjust the possible start and end points of the runs for (unsigned ri = 0; ri < L->constr_len; ri++) { while(ngline_dev_run_top_adjust(L, L->b_runs[ri].topEnd, line_len, L->constr[ri])); while(ngline_dev_run_bot_adjust(L, L->b_runs[ri].botStart, line_len, L->constr[ri])); } // Propagate changes - one thread only! ngline_dev_run_top_prop(L); ngline_dev_run_bot_prop(L); // Fill overlaps for (unsigned ri = 0; ri < L->constr_len; ri++) { dirty |= ngline_dev_run_fill_white(L, B, ri); dirty |= ngline_dev_run_fill_black(L, B, &L->b_runs[ri], L->constr[ri]); } dirty |= ngline_dev_run_fill_white(L, B, L->constr_len); return dirty; } __device__ bool ngline_dev_block_solve(NonogramLineDevice *L, Board2DDevice *B) { unsigned block_topStart = 0; unsigned block_start; unsigned block_end; unsigned block_botEnd = 0; unsigned ri_first = 0; unsigned ri_last = 0; unsigned i = 0; bool solved = true; bool dirty = false; while (i < L->len) { if (L->data[i] == NGCOLOR_UNKNOWN) { // No blocks to solve here i++; solved = false; continue; } if (L->data[i] == NGCOLOR_WHITE) { // White blocks, nothing to solve i++; block_topStart = i; continue; } // At this point we have reached a shaded block block_start = i; while (L->data[i] == NGCOLOR_BLACK) { // Find the end of the shaded block i++; if (i == L->len) break; } block_end = i; block_botEnd = i; // Find the next white block (to determine the maximum possible extent of the shaded block while (block_botEnd < L->len && L->data[block_botEnd] != NGCOLOR_WHITE) block_botEnd++; // Determine the minimum and maximum length of this block unsigned block_len_min = block_end - block_start; unsigned block_len_max = block_botEnd - block_topStart; // The number of runs that will fit this block unsigned run_fit_count = 0; // next three are not valid if run_fit_count == 0 unsigned run_fit_index = 0; // The bottom-most fitting run unsigned run_len_min = L->len; // The minimum length of all the fitting runs unsigned run_len_max = 0; // The maximum length of all the fitting runs // Get the run valid run indexes while (L->b_runs[ri_first].botStart + L->constr[ri_first] < block_end) ri_first++; ri_last = ri_first; while (ri_last < L->constr_len && L->b_runs[ri_last].topEnd <= block_start + L->constr[ri_last]) { unsigned run_len = L->constr[ri_last]; // Check that the run length will fit the block if (block_len_min <= run_len && run_len <= block_len_max) { if (run_fit_count == 0) { // Make the topmost possible run start no later than this run if (L->b_runs[ri_last].botStart > block_start) { L->b_runs[ri_last].botStart = block_start; dirty = true; } run_len_min = run_len; run_len_max = run_len; } else { run_len_min = dev_min(run_len, run_len_min); run_len_max = dev_max(run_len, run_len_max); } run_fit_count++; run_fit_index = ri_last; } ri_last++; } // TODO If checking for contradiction, must check no possible runs // Make the bottommost possible run start no earlier than this run if (L->b_runs[run_fit_index].topEnd < block_end){ L->b_runs[run_fit_index].topEnd = block_end; dirty = true; } while (block_end < block_topStart + run_len_min) { // If the minimum run length puts the last cell in the shortest possible run further right // than the last cell in the block, fill up in between. dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_BLACK, block_end); block_end++; } while (block_start > block_botEnd - run_len_min) { // If the minimum run length puts the first cell in the shortest possible run further left // than the first cell in the block, fill up in between. block_start--; dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_BLACK, block_start); } if (block_len_min == run_len_max) { // If the block is already the maximum run length, then fill up white around it. if (block_end != L->len) dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_WHITE, block_end); if (block_start != 0) dirty |= ngline_dev_cell_solve(L, B, NGCOLOR_WHITE, block_start - 1); } } if (solved) L->solved = true; return dirty; } __device__ void ngline_dev_mutableonly_copy(NonogramLineDevice *L_dst, const NonogramLineDevice *L_src) { L_dst->solved = L_src->solved; for (unsigned i = 0; i < L_src->constr_len; i++) { L_dst->b_runs[i] = L_src->b_runs[i]; } } bool ng_linearr_init_host(unsigned w, unsigned h, NonogramLineDevice **Ls) { // Allocate the array of line solver structs unsigned Ls_len = w + h; size_t Ls_size = sizeof(NonogramLineDevice) * Ls_len; NonogramLineDevice *Ls_tmp = (NonogramLineDevice *)malloc(Ls_size); if (Ls_tmp == NULL) { fprintf(stderr, "Failed to allocate host line array\n"); return false; } for (unsigned i = 0; i < h; i++) { Ls_tmp[i].constr_len = 0; Ls_tmp[i].line_index = i; Ls_tmp[i].line_is_row = true; Ls_tmp[i].len = w; Ls_tmp[i].solved = false; } for (unsigned i = 0; i < w; i++) { Ls_tmp[i + h].constr_len = 0; Ls_tmp[i + h].line_index = i; Ls_tmp[i + h].line_is_row = false; Ls_tmp[i + h].len = h; Ls_tmp[i + h].solved = false; } *Ls = Ls_tmp; return true; } #ifdef __NVCC__ NonogramLineDevice *ng_linearr_init_dev(unsigned w, unsigned h, NonogramLineDevice *Ls_host) { void *Ls_dev; size_t Ls_size = sizeof(NonogramLineDevice) * (w + h); cudaCheckError(cudaMalloc(&Ls_dev, Ls_size)); cudaCheckError(cudaMemcpy(Ls_dev, (void *)Ls_host, Ls_size, cudaMemcpyHostToDevice)); return (NonogramLineDevice *)Ls_dev; } #endif #ifdef __NVCC__ void ng_linearr_free_dev(NonogramLineDevice *Ls_dev) { cudaCheckError(cudaFree(Ls_dev)); } #endif NonogramLineDevice *ng_linearr_deepcopy_host(NonogramLineDevice *Ls, unsigned w, unsigned h) { unsigned Ls_len = w + h; size_t Ls_size = sizeof(NonogramLineDevice) * Ls_len; NonogramLineDevice *Ls_copy = (NonogramLineDevice *)malloc(Ls_size); if (Ls_copy == NULL) { fprintf(stderr, "Failed to allocate copy of line array\n"); return NULL; } memcpy((void *)Ls_copy, (void *)Ls, Ls_size); return Ls_copy; } #ifdef __NVCC__ NonogramLineDevice *ng_linearr_deepcopy_dev_double(NonogramLineDevice *Ls, unsigned Ls_size) { NonogramLineDevice *Ls_dcopy; cudaCheckError(cudaMalloc((void **)&Ls_dcopy, 2 * Ls_size)); cudaCheckError(cudaMemcpy((void *)Ls_dcopy, (void *)Ls, Ls_size, cudaMemcpyDeviceToDevice)); char *Ls_copy2 = ((char *) Ls_dcopy) + Ls_size; cudaCheckError(cudaMemcpy((void *)Ls_copy2, (void *)Ls, Ls_size, cudaMemcpyDeviceToDevice)); return Ls_dcopy; } #endif void ng_linearr_board_change(NonogramLineDevice *Ls, Board2DDevice *B) { for (unsigned i = 0; i < B->h; i++) { Ls[i].data = board2d_host_row_ptr_get(B, i); } for (unsigned i = 0; i < B->w; i++) { Ls[i + B->h].data = board2d_host_col_ptr_get(B, i); } }
2103a118e2699f16aa734aac2af64c930ea502a6.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <hip/hip_runtime.h> #include <stdio.h> /* * This example implements matrix element-wise addition on the host and GPU. * sumMatrixOnHost iterates over the rows and columns of each matrix, adding * elements from A and B together and storing the results in C. The current * offset in each matrix is stored using pointer arithmetic. sumMatrixOnGPU2D * implements the same logic, but using CUDA threads to process each matrix. */ void initialData(float *ip, const int size) { int i; for(i = 0; i < size; i++) { ip[i] = (float)( rand() & 0xFF ) / 10.0f; } } void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) { float *ia = A; float *ib = B; float *ic = C; for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) { ic[ix] = ia[ix] + ib[ix]; } ia += nx; ib += nx; ic += nx; } return; } void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { printf("host %f gpu %f ", hostRef[i], gpuRef[i]); printf("Arrays do not match.\n\n"); break; } } } // grid 2D block 2D __global__ void sumMatrixOnGPU2D(float *A, float *B, float *C, int NX, int NY) { unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; unsigned int idx = iy * NX + ix; if (ix < NX && iy < NY) { C[idx] = A[idx] + B[idx]; } } int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); CHECK(hipSetDevice(dev)); // set up data size of matrix int nx = 1 << 13; int ny = 1 << 13; int nxy = nx * ny; int nBytes = nxy * sizeof(float); // malloc host memory float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); // initialize data at host side double iStart = seconds(); initialData(h_A, nxy); initialData(h_B, nxy); double iElaps = seconds() - iStart; memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // add matrix at host side for result checks iStart = seconds(); sumMatrixOnHost (h_A, h_B, hostRef, nx, ny); iElaps = seconds() - iStart; // malloc device global memory float *d_MatA, *d_MatB, *d_MatC; CHECK(hipMalloc((void **)&d_MatA, nBytes)); CHECK(hipMalloc((void **)&d_MatB, nBytes)); CHECK(hipMalloc((void **)&d_MatC, nBytes)); // transfer data from host to device CHECK(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice)); // invoke kernel at host side int dimx = 32; int dimy = 32; if(argc > 2) { dimx = atoi(argv[1]); dimy = atoi(argv[2]); } dim3 block(dimx, dimy); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); // execute the kernel CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( sumMatrixOnGPU2D), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx, ny); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; printf("sumMatrixOnGPU2D <<<(%d,%d), (%d,%d)>>> elapsed %f ms\n", grid.x, grid.y, block.x, block.y, iElaps); CHECK(hipGetLastError()); // copy kernel result back to host side CHECK(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost)); // check device results checkResult(hostRef, gpuRef, nxy); // free device global memory CHECK(hipFree(d_MatA)); CHECK(hipFree(d_MatB)); CHECK(hipFree(d_MatC)); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); // reset device CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
2103a118e2699f16aa734aac2af64c930ea502a6.cu
#include "../common/common.h" #include <cuda_runtime.h> #include <stdio.h> /* * This example implements matrix element-wise addition on the host and GPU. * sumMatrixOnHost iterates over the rows and columns of each matrix, adding * elements from A and B together and storing the results in C. The current * offset in each matrix is stored using pointer arithmetic. sumMatrixOnGPU2D * implements the same logic, but using CUDA threads to process each matrix. */ void initialData(float *ip, const int size) { int i; for(i = 0; i < size; i++) { ip[i] = (float)( rand() & 0xFF ) / 10.0f; } } void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) { float *ia = A; float *ib = B; float *ic = C; for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) { ic[ix] = ia[ix] + ib[ix]; } ia += nx; ib += nx; ic += nx; } return; } void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { printf("host %f gpu %f ", hostRef[i], gpuRef[i]); printf("Arrays do not match.\n\n"); break; } } } // grid 2D block 2D __global__ void sumMatrixOnGPU2D(float *A, float *B, float *C, int NX, int NY) { unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; unsigned int idx = iy * NX + ix; if (ix < NX && iy < NY) { C[idx] = A[idx] + B[idx]; } } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); CHECK(cudaSetDevice(dev)); // set up data size of matrix int nx = 1 << 13; int ny = 1 << 13; int nxy = nx * ny; int nBytes = nxy * sizeof(float); // malloc host memory float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); // initialize data at host side double iStart = seconds(); initialData(h_A, nxy); initialData(h_B, nxy); double iElaps = seconds() - iStart; memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // add matrix at host side for result checks iStart = seconds(); sumMatrixOnHost (h_A, h_B, hostRef, nx, ny); iElaps = seconds() - iStart; // malloc device global memory float *d_MatA, *d_MatB, *d_MatC; CHECK(cudaMalloc((void **)&d_MatA, nBytes)); CHECK(cudaMalloc((void **)&d_MatB, nBytes)); CHECK(cudaMalloc((void **)&d_MatC, nBytes)); // transfer data from host to device CHECK(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice)); // invoke kernel at host side int dimx = 32; int dimy = 32; if(argc > 2) { dimx = atoi(argv[1]); dimy = atoi(argv[2]); } dim3 block(dimx, dimy); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); // execute the kernel CHECK(cudaDeviceSynchronize()); iStart = seconds(); sumMatrixOnGPU2D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; printf("sumMatrixOnGPU2D <<<(%d,%d), (%d,%d)>>> elapsed %f ms\n", grid.x, grid.y, block.x, block.y, iElaps); CHECK(cudaGetLastError()); // copy kernel result back to host side CHECK(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost)); // check device results checkResult(hostRef, gpuRef, nxy); // free device global memory CHECK(cudaFree(d_MatA)); CHECK(cudaFree(d_MatB)); CHECK(cudaFree(d_MatC)); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); // reset device CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
66352e52af228599a9bea70a83515f605ae17eb6.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <iostream> #include <cstdio> #include <fstream> #include "utils.h" #include "timer.h" #include <cstdio> #if defined(_WIN16) || defined(_WIN32) || defined(_WIN64) #include <Windows.h> #else #include <sys/time.h> #endif #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/normal_distribution.h> #include <thrust/random/uniform_int_distribution.h> #include "reference_calc.h" void computeHistogram(const unsigned int *const d_vals, unsigned int* const d_histo, const unsigned int numBins, const unsigned int numElems); int main(void) { const unsigned int numBins = 1024; const unsigned int numElems = 10000 * numBins; const float stddev = 100.f; unsigned int *vals = new unsigned int[numElems]; unsigned int *h_vals = new unsigned int[numElems]; unsigned int *h_studentHisto = new unsigned int[numBins]; unsigned int *h_refHisto = new unsigned int[numBins]; #if defined(_WIN16) || defined(_WIN32) || defined(_WIN64) srand(GetTickCount()); #else timeval tv; gettimeofday(&tv, NULL); srand(tv.tv_usec); #endif //make the mean unpredictable, but close enough to the middle //so that timings are unaffected unsigned int mean = rand() % 100 + 462; //Output mean so that grading can happen with the same inputs std::cout << mean << std::endl; thrust::minstd_rand rng; thrust::random::normal_distribution<float> normalDist((float)mean, stddev); // Generate the random values for (size_t i = 0; i < numElems; ++i) { vals[i] = ::min((unsigned int) ::max((int)normalDist(rng), 0), numBins - 1); } unsigned int *d_vals, *d_histo; GpuTimer timer; checkCudaErrors(hipMalloc(&d_vals, sizeof(unsigned int) * numElems)); checkCudaErrors(hipMalloc(&d_histo, sizeof(unsigned int) * numBins)); checkCudaErrors(hipMemset(d_histo, 0, sizeof(unsigned int) * numBins)); checkCudaErrors(hipMemcpy(d_vals, vals, sizeof(unsigned int) * numElems, hipMemcpyHostToDevice)); timer.Start(); computeHistogram(d_vals, d_histo, numBins, numElems); timer.Stop(); int err = printf("Your code ran in: %f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } // copy the student-computed histogram back to the host checkCudaErrors(hipMemcpy(h_studentHisto, d_histo, sizeof(unsigned int) * numBins, hipMemcpyDeviceToHost)); //generate reference for the given mean reference_calculation(vals, h_refHisto, numBins, numElems); //Now do the comparison checkResultsExact(h_refHisto, h_studentHisto, numBins); delete[] h_vals; delete[] h_refHisto; delete[] h_studentHisto; hipFree(d_vals); hipFree(d_histo); return 0; }
66352e52af228599a9bea70a83515f605ae17eb6.cu
#include <cstdlib> #include <iostream> #include <cstdio> #include <fstream> #include "utils.h" #include "timer.h" #include <cstdio> #if defined(_WIN16) || defined(_WIN32) || defined(_WIN64) #include <Windows.h> #else #include <sys/time.h> #endif #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/normal_distribution.h> #include <thrust/random/uniform_int_distribution.h> #include "reference_calc.h" void computeHistogram(const unsigned int *const d_vals, unsigned int* const d_histo, const unsigned int numBins, const unsigned int numElems); int main(void) { const unsigned int numBins = 1024; const unsigned int numElems = 10000 * numBins; const float stddev = 100.f; unsigned int *vals = new unsigned int[numElems]; unsigned int *h_vals = new unsigned int[numElems]; unsigned int *h_studentHisto = new unsigned int[numBins]; unsigned int *h_refHisto = new unsigned int[numBins]; #if defined(_WIN16) || defined(_WIN32) || defined(_WIN64) srand(GetTickCount()); #else timeval tv; gettimeofday(&tv, NULL); srand(tv.tv_usec); #endif //make the mean unpredictable, but close enough to the middle //so that timings are unaffected unsigned int mean = rand() % 100 + 462; //Output mean so that grading can happen with the same inputs std::cout << mean << std::endl; thrust::minstd_rand rng; thrust::random::normal_distribution<float> normalDist((float)mean, stddev); // Generate the random values for (size_t i = 0; i < numElems; ++i) { vals[i] = std::min((unsigned int) std::max((int)normalDist(rng), 0), numBins - 1); } unsigned int *d_vals, *d_histo; GpuTimer timer; checkCudaErrors(cudaMalloc(&d_vals, sizeof(unsigned int) * numElems)); checkCudaErrors(cudaMalloc(&d_histo, sizeof(unsigned int) * numBins)); checkCudaErrors(cudaMemset(d_histo, 0, sizeof(unsigned int) * numBins)); checkCudaErrors(cudaMemcpy(d_vals, vals, sizeof(unsigned int) * numElems, cudaMemcpyHostToDevice)); timer.Start(); computeHistogram(d_vals, d_histo, numBins, numElems); timer.Stop(); int err = printf("Your code ran in: %f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } // copy the student-computed histogram back to the host checkCudaErrors(cudaMemcpy(h_studentHisto, d_histo, sizeof(unsigned int) * numBins, cudaMemcpyDeviceToHost)); //generate reference for the given mean reference_calculation(vals, h_refHisto, numBins, numElems); //Now do the comparison checkResultsExact(h_refHisto, h_studentHisto, numBins); delete[] h_vals; delete[] h_refHisto; delete[] h_studentHisto; cudaFree(d_vals); cudaFree(d_histo); return 0; }
fd23188c832aa1e0e068a16b6f8c2cd6166f68f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void min_output(float *input, float *output, float *indices, long nrows, long ncols) { // output offset: long o = threadIdx.x + blockDim.x * blockIdx.x; if (o >= nrows) return; // input offset: long i = o * ncols; // move pointers input = input + i; // compute min: float min = input[0]; long argmin = 0; long ii; for (ii=1; ii<ncols; ii++) { float val = input[ii]; if (val < min) { min = val; argmin = ii; } } // store output[o] = min; indices[o] = argmin+1; }
fd23188c832aa1e0e068a16b6f8c2cd6166f68f0.cu
#include "includes.h" __global__ void min_output(float *input, float *output, float *indices, long nrows, long ncols) { // output offset: long o = threadIdx.x + blockDim.x * blockIdx.x; if (o >= nrows) return; // input offset: long i = o * ncols; // move pointers input = input + i; // compute min: float min = input[0]; long argmin = 0; long ii; for (ii=1; ii<ncols; ii++) { float val = input[ii]; if (val < min) { min = val; argmin = ii; } } // store output[o] = min; indices[o] = argmin+1; }
d5cfe5c0b2e9a2bef3855057575e419f0ae8f850.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdlib> #include <algorithm> #include <climits> #include "utils.h" #include "reference.cpp" #define TPB 1024 __device__ unsigned int sum_reduce_warp(unsigned int * s_buf) { unsigned int i = threadIdx.x; if ((i&0x1) == 0) s_buf[i] += s_buf[i|0x1]; if ((i&0x3) == 0) s_buf[i] += s_buf[i|0x2]; if ((i&0x7) == 0) s_buf[i] += s_buf[i|0x4]; if ((i&0xf) == 0) s_buf[i] += s_buf[i|0x8]; unsigned int k = i & ~0x1f; return s_buf[k] + s_buf[k|0x10]; } __device__ unsigned int sum_reduce_block(unsigned int * s_buf) { assert(blockDim.x <= 1024); unsigned int sum_warp = sum_reduce_warp(s_buf); __shared__ unsigned int s_sums[1024]; unsigned int i = threadIdx.x; if (i<32 && i>=(blockDim.x>>5)) s_sums[i] = 0; if ((i&0x1f) == 0) s_sums[i>>5] = sum_warp; __syncthreads(); // only valid in 1st warp of a block return sum_reduce_warp(s_sums); } __global__ void sum_reduce_block_kernel(unsigned int const * d_in, unsigned int * d_out, unsigned int n) { assert(blockDim.x <= 1024); __shared__ unsigned int s_vals[1024]; unsigned int const i = threadIdx.x; unsigned int const j = threadIdx.x + blockIdx.x * blockDim.x; s_vals[i] = 0; if (j < n) s_vals[i] = d_in[j]; unsigned int const sum = sum_reduce_block(s_vals); if (threadIdx.x == 0) { d_out[blockIdx.x] = sum; } } #define N 2048 #define NBLOCKS 2 void test_reduce_block() { unsigned int h_in[N], h_out[NBLOCKS]; std::srand(0); for (unsigned int i = 0; i < N; ++i) h_in[i] = std::rand() & 0x1; unsigned int * d_in, * d_out; unsigned int bufsz = N*sizeof(unsigned int); unsigned int outsz = NBLOCKS*sizeof(unsigned int); checkCudaErrors(hipMalloc((void**)&d_in, bufsz)); checkCudaErrors(hipMalloc((void**)&d_out, outsz)); checkCudaErrors(hipMemcpy(d_in, h_in, bufsz, hipMemcpyHostToDevice)); unsigned int const bsz = TPB; unsigned int const nblocks = N / bsz + (N%bsz != 0); hipLaunchKernelGGL(( sum_reduce_block_kernel), dim3(nblocks),dim3(bsz), 0, 0, d_in, d_out, N); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemcpy(h_out, d_out, outsz, hipMemcpyDeviceToHost)); hipFree(d_in); hipFree(d_out); unsigned int * p = h_in; for (int i = 0; i < nblocks; ++i) { unsigned int sum = 0; for (int j = 0; j < TPB && p < h_in + N; ++j) { sum += *p++; } printf("block %d: ", i); if (sum == h_out[i]) { printf("PASS\n"); } else { printf("FAIL! expected: %u, got: %u\n", sum, h_out[i]); } } /* printf("input:\n"); for (int i = 0; i < N; ++i) { printf("%d: %u\n", i, h_in[i]); } printf("output:\n"); for (int i = 0; i < NBLOCKS; ++i) { printf("%d: %u\n", i, h_out[i]); } */ }
d5cfe5c0b2e9a2bef3855057575e419f0ae8f850.cu
#include <cstdio> #include <cstdlib> #include <algorithm> #include <climits> #include "utils.h" #include "reference.cpp" #define TPB 1024 __device__ unsigned int sum_reduce_warp(unsigned int * s_buf) { unsigned int i = threadIdx.x; if ((i&0x1) == 0) s_buf[i] += s_buf[i|0x1]; if ((i&0x3) == 0) s_buf[i] += s_buf[i|0x2]; if ((i&0x7) == 0) s_buf[i] += s_buf[i|0x4]; if ((i&0xf) == 0) s_buf[i] += s_buf[i|0x8]; unsigned int k = i & ~0x1f; return s_buf[k] + s_buf[k|0x10]; } __device__ unsigned int sum_reduce_block(unsigned int * s_buf) { assert(blockDim.x <= 1024); unsigned int sum_warp = sum_reduce_warp(s_buf); __shared__ unsigned int s_sums[1024]; unsigned int i = threadIdx.x; if (i<32 && i>=(blockDim.x>>5)) s_sums[i] = 0; if ((i&0x1f) == 0) s_sums[i>>5] = sum_warp; __syncthreads(); // only valid in 1st warp of a block return sum_reduce_warp(s_sums); } __global__ void sum_reduce_block_kernel(unsigned int const * d_in, unsigned int * d_out, unsigned int n) { assert(blockDim.x <= 1024); __shared__ unsigned int s_vals[1024]; unsigned int const i = threadIdx.x; unsigned int const j = threadIdx.x + blockIdx.x * blockDim.x; s_vals[i] = 0; if (j < n) s_vals[i] = d_in[j]; unsigned int const sum = sum_reduce_block(s_vals); if (threadIdx.x == 0) { d_out[blockIdx.x] = sum; } } #define N 2048 #define NBLOCKS 2 void test_reduce_block() { unsigned int h_in[N], h_out[NBLOCKS]; std::srand(0); for (unsigned int i = 0; i < N; ++i) h_in[i] = std::rand() & 0x1; unsigned int * d_in, * d_out; unsigned int bufsz = N*sizeof(unsigned int); unsigned int outsz = NBLOCKS*sizeof(unsigned int); checkCudaErrors(cudaMalloc((void**)&d_in, bufsz)); checkCudaErrors(cudaMalloc((void**)&d_out, outsz)); checkCudaErrors(cudaMemcpy(d_in, h_in, bufsz, cudaMemcpyHostToDevice)); unsigned int const bsz = TPB; unsigned int const nblocks = N / bsz + (N%bsz != 0); sum_reduce_block_kernel<<<nblocks,bsz>>>(d_in, d_out, N); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemcpy(h_out, d_out, outsz, cudaMemcpyDeviceToHost)); cudaFree(d_in); cudaFree(d_out); unsigned int * p = h_in; for (int i = 0; i < nblocks; ++i) { unsigned int sum = 0; for (int j = 0; j < TPB && p < h_in + N; ++j) { sum += *p++; } printf("block %d: ", i); if (sum == h_out[i]) { printf("PASS\n"); } else { printf("FAIL! expected: %u, got: %u\n", sum, h_out[i]); } } /* printf("input:\n"); for (int i = 0; i < N; ++i) { printf("%d: %u\n", i, h_in[i]); } printf("output:\n"); for (int i = 0; i < NBLOCKS; ++i) { printf("%d: %u\n", i, h_out[i]); } */ }
11d170ed032b586d4a3c869cb50409140227d58b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <iostream> #include <hip/hip_runtime.h> // For CUDA runtime API #include <helper_cuda.h> // For checkCudaError macro #include <helper_timer.h> // For CUDA SDK timers #include "wtime.h" #include "mmio.h" #include "matrixPreprocessing.h" #include "test.h" #include "matrixVector.h" //Simple dimension: define a 1D block structure #define BD 256 const dim3 BLOCK_DIM(BD); // Generates a random vector void generateVector(int vectorSize, double* vector) { srand(12345); for (int row = 0; row < vectorSize; ++row) { vector[row] = 100.0f * ((double)rand()) / RAND_MAX; } } // Generates a random matrix void generateMatrix(int rows, int cols, double* matrix) { srand(21345); int idx; for (int row = 0; row < rows; ++row) { for (int col = 0; col < cols; ++col) { idx = row * cols + col; matrix[idx] = 100.0f * ((double)rand()) / RAND_MAX; } } } int main(int argc, char** argv) { testMatrixProcessing(); testMatrixVectorProduct(); char* fileName; if (argc < 2) { fprintf(stderr, "Usage: %s [martix-market-filename]\nWe will be using the cage4 matrix as default\n", argv[0]); //fileName = "D:\\Cranfield work\\Small Scale Parallel Programming\\matrices\\cage4.mtx"; fileName = "../matrices/cage4.mtx"; } else { fileName = argv[1]; } // We read the file of the matrix int ret_code; MM_typecode matcode; FILE *f; int M, N, nz; int i, *I, *J; double *val; // If the file of the matrix cannot be opened if ((f = fopen(fileName, "r")) == NULL) fprintf(stdout, "The file %s could not be opened\n", fileName); else if (mm_read_banner(f, &matcode) != 0) fprintf(stdout, "Could not process Matrix Market banner.\n"); else if (mm_is_complex(matcode) && mm_is_matrix(matcode) && mm_is_sparse(matcode)) fprintf(stdout, "Sorry, this application does not support Market Market type: [%s]\n", mm_typecode_to_str(matcode)); else if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) != 0) fprintf(stdout, "Could not read the size of the matrix"); else { /* reseve memory for matrices */ I = (int *)malloc(nz * sizeof(int)); J = (int *)malloc(nz * sizeof(int)); val = (double *)malloc(nz * sizeof(double)); /* NOTE: when reading in doubles, ANSI C requires the use of the "l" */ /* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */ /* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */ for (i = 0; i < nz; i++) { fscanf(f, "%d %d %lg\n", &I[i], &J[i], &val[i]); I[i]--; // adjust from 1-based to 0-based J[i]--; } if (f != stdin) fclose(f); // We now have the matrix with I, J and val // We will generate our CSR matrix from them int* IRP = (int *)malloc((M + 1) * sizeof(int)); int* JA_CSR = (int *)malloc(nz * sizeof(int)); double* AS_CSR = (double *)malloc(nz * sizeof(double)); IRP = getCSR_IRP(M, nz, I); JA_CSR = getCSR_JA(nz, I, J); AS_CSR = getCSR_AS(nz, I, val); // We will generate our ELLPACK matrix int MAXNZ; MAXNZ = getELLPACK_MAXNZ(nz, I); int* JA_ELLPACK = (int *)malloc(M * MAXNZ * sizeof(int)); double* AS_ELLPACK = (double *)malloc(M * MAXNZ * sizeof(double)); JA_ELLPACK = getELLPACK_JA(M, nz, I, J, MAXNZ); AS_ELLPACK = getELLPACK_AS(M, nz, I, val, MAXNZ); // We initiate our matrices for the product double* x = (double*)malloc(sizeof(double)*N); double* y = (double*)malloc(sizeof(double)*M); // We generate randomly x of size N generateVector(N, x); // We create our CUDA matrices double *d_AS_CSR, *d_AS_ELLPACK, *d_x, *d_y; int *d_IRP, *d_JA_CSR, *d_JA_ELLPACK; checkCudaErrors(hipMalloc((void**)&d_AS_CSR, nz * sizeof(double))); checkCudaErrors(hipMalloc((void**)&d_AS_ELLPACK, M * MAXNZ * sizeof(double))); checkCudaErrors(hipMalloc((void**)&d_x, N * sizeof(double))); checkCudaErrors(hipMalloc((void**)&d_y, M * sizeof(double))); checkCudaErrors(hipMalloc((void**)&d_IRP, (M + 1) * sizeof(int))); checkCudaErrors(hipMalloc((void**)&d_JA_CSR, nz * sizeof(int))); checkCudaErrors(hipMalloc((void**)&d_JA_ELLPACK, M * MAXNZ * sizeof(int))); // Copy matrices from the host (CPU) to the device (GPU). checkCudaErrors(hipMemcpy(d_AS_CSR, AS_CSR, nz * sizeof(double), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_AS_ELLPACK, AS_ELLPACK, M * MAXNZ * sizeof(double), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_x, x, N * sizeof(double), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_IRP, IRP, (M + 1) * sizeof(int), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_JA_CSR, JA_CSR, nz * sizeof(int), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_JA_ELLPACK, JA_ELLPACK, M * MAXNZ * sizeof(int), hipMemcpyHostToDevice)); // Calculate the dimension of the grid of blocks (1D) necessary to cover all rows. const dim3 GRID_DIM((M - 1 + BLOCK_DIM.x) / BLOCK_DIM.x, 1); double flopcnt = 2.e-6*M*N; // Create the CUDA SDK timer. StopWatchInterface* timer = 0; sdkCreateTimer(&timer); // We do the product with CSR timer->start(); for (int i=0;i<10;i++) MatrixVectorCSR << <GRID_DIM, BLOCK_DIM >> > (M, d_IRP, d_JA_CSR, d_AS_CSR, d_x, d_y); checkCudaErrors(hipDeviceSynchronize()); timer->stop(); double gpuflops = 10 * flopcnt / timer->getTime(); // We print our results fprintf(stdout, "CSR: Matrix-Vector product of size %d x %d: time %lf GFLOPS %lf \n", M, N, (timer->getTime())/10, gpuflops); // We do the product for ELLPACK timer->reset(); timer->start(); for (int i = 0;i < 10;i++) MatrixVectorELLPACK << <GRID_DIM, BLOCK_DIM >> > (M, N, MAXNZ, d_JA_ELLPACK, d_AS_ELLPACK, d_x, d_y); checkCudaErrors(hipDeviceSynchronize()); timer->stop(); gpuflops = 10 * flopcnt / timer->getTime(); // We print our results fprintf(stdout, "ELLPACK: Matrix-Vector product of size %d x %d: time %lf GFLOPS %lf \n", M, N, (timer->getTime())/10, gpuflops); // We free the matrices and vectors delete timer; checkCudaErrors(hipFree(d_IRP)); checkCudaErrors(hipFree(d_x)); checkCudaErrors(hipFree(d_JA_CSR)); checkCudaErrors(hipFree(d_AS_CSR)); checkCudaErrors(hipFree(d_JA_ELLPACK)); checkCudaErrors(hipFree(d_AS_ELLPACK)); free(IRP); free(JA_CSR); free(AS_CSR); free(JA_ELLPACK); free(AS_ELLPACK); free(x); free(y); } return 0; }
11d170ed032b586d4a3c869cb50409140227d58b.cu
#include <stdlib.h> #include <stdio.h> #include <iostream> #include <cuda_runtime.h> // For CUDA runtime API #include <helper_cuda.h> // For checkCudaError macro #include <helper_timer.h> // For CUDA SDK timers #include "wtime.h" #include "mmio.h" #include "matrixPreprocessing.h" #include "test.h" #include "matrixVector.h" //Simple dimension: define a 1D block structure #define BD 256 const dim3 BLOCK_DIM(BD); // Generates a random vector void generateVector(int vectorSize, double* vector) { srand(12345); for (int row = 0; row < vectorSize; ++row) { vector[row] = 100.0f * ((double)rand()) / RAND_MAX; } } // Generates a random matrix void generateMatrix(int rows, int cols, double* matrix) { srand(21345); int idx; for (int row = 0; row < rows; ++row) { for (int col = 0; col < cols; ++col) { idx = row * cols + col; matrix[idx] = 100.0f * ((double)rand()) / RAND_MAX; } } } int main(int argc, char** argv) { testMatrixProcessing(); testMatrixVectorProduct(); char* fileName; if (argc < 2) { fprintf(stderr, "Usage: %s [martix-market-filename]\nWe will be using the cage4 matrix as default\n", argv[0]); //fileName = "D:\\Cranfield work\\Small Scale Parallel Programming\\matrices\\cage4.mtx"; fileName = "../matrices/cage4.mtx"; } else { fileName = argv[1]; } // We read the file of the matrix int ret_code; MM_typecode matcode; FILE *f; int M, N, nz; int i, *I, *J; double *val; // If the file of the matrix cannot be opened if ((f = fopen(fileName, "r")) == NULL) fprintf(stdout, "The file %s could not be opened\n", fileName); else if (mm_read_banner(f, &matcode) != 0) fprintf(stdout, "Could not process Matrix Market banner.\n"); else if (mm_is_complex(matcode) && mm_is_matrix(matcode) && mm_is_sparse(matcode)) fprintf(stdout, "Sorry, this application does not support Market Market type: [%s]\n", mm_typecode_to_str(matcode)); else if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) != 0) fprintf(stdout, "Could not read the size of the matrix"); else { /* reseve memory for matrices */ I = (int *)malloc(nz * sizeof(int)); J = (int *)malloc(nz * sizeof(int)); val = (double *)malloc(nz * sizeof(double)); /* NOTE: when reading in doubles, ANSI C requires the use of the "l" */ /* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */ /* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */ for (i = 0; i < nz; i++) { fscanf(f, "%d %d %lg\n", &I[i], &J[i], &val[i]); I[i]--; // adjust from 1-based to 0-based J[i]--; } if (f != stdin) fclose(f); // We now have the matrix with I, J and val // We will generate our CSR matrix from them int* IRP = (int *)malloc((M + 1) * sizeof(int)); int* JA_CSR = (int *)malloc(nz * sizeof(int)); double* AS_CSR = (double *)malloc(nz * sizeof(double)); IRP = getCSR_IRP(M, nz, I); JA_CSR = getCSR_JA(nz, I, J); AS_CSR = getCSR_AS(nz, I, val); // We will generate our ELLPACK matrix int MAXNZ; MAXNZ = getELLPACK_MAXNZ(nz, I); int* JA_ELLPACK = (int *)malloc(M * MAXNZ * sizeof(int)); double* AS_ELLPACK = (double *)malloc(M * MAXNZ * sizeof(double)); JA_ELLPACK = getELLPACK_JA(M, nz, I, J, MAXNZ); AS_ELLPACK = getELLPACK_AS(M, nz, I, val, MAXNZ); // We initiate our matrices for the product double* x = (double*)malloc(sizeof(double)*N); double* y = (double*)malloc(sizeof(double)*M); // We generate randomly x of size N generateVector(N, x); // We create our CUDA matrices double *d_AS_CSR, *d_AS_ELLPACK, *d_x, *d_y; int *d_IRP, *d_JA_CSR, *d_JA_ELLPACK; checkCudaErrors(cudaMalloc((void**)&d_AS_CSR, nz * sizeof(double))); checkCudaErrors(cudaMalloc((void**)&d_AS_ELLPACK, M * MAXNZ * sizeof(double))); checkCudaErrors(cudaMalloc((void**)&d_x, N * sizeof(double))); checkCudaErrors(cudaMalloc((void**)&d_y, M * sizeof(double))); checkCudaErrors(cudaMalloc((void**)&d_IRP, (M + 1) * sizeof(int))); checkCudaErrors(cudaMalloc((void**)&d_JA_CSR, nz * sizeof(int))); checkCudaErrors(cudaMalloc((void**)&d_JA_ELLPACK, M * MAXNZ * sizeof(int))); // Copy matrices from the host (CPU) to the device (GPU). checkCudaErrors(cudaMemcpy(d_AS_CSR, AS_CSR, nz * sizeof(double), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_AS_ELLPACK, AS_ELLPACK, M * MAXNZ * sizeof(double), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_x, x, N * sizeof(double), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_IRP, IRP, (M + 1) * sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_JA_CSR, JA_CSR, nz * sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_JA_ELLPACK, JA_ELLPACK, M * MAXNZ * sizeof(int), cudaMemcpyHostToDevice)); // Calculate the dimension of the grid of blocks (1D) necessary to cover all rows. const dim3 GRID_DIM((M - 1 + BLOCK_DIM.x) / BLOCK_DIM.x, 1); double flopcnt = 2.e-6*M*N; // Create the CUDA SDK timer. StopWatchInterface* timer = 0; sdkCreateTimer(&timer); // We do the product with CSR timer->start(); for (int i=0;i<10;i++) MatrixVectorCSR << <GRID_DIM, BLOCK_DIM >> > (M, d_IRP, d_JA_CSR, d_AS_CSR, d_x, d_y); checkCudaErrors(cudaDeviceSynchronize()); timer->stop(); double gpuflops = 10 * flopcnt / timer->getTime(); // We print our results fprintf(stdout, "CSR: Matrix-Vector product of size %d x %d: time %lf GFLOPS %lf \n", M, N, (timer->getTime())/10, gpuflops); // We do the product for ELLPACK timer->reset(); timer->start(); for (int i = 0;i < 10;i++) MatrixVectorELLPACK << <GRID_DIM, BLOCK_DIM >> > (M, N, MAXNZ, d_JA_ELLPACK, d_AS_ELLPACK, d_x, d_y); checkCudaErrors(cudaDeviceSynchronize()); timer->stop(); gpuflops = 10 * flopcnt / timer->getTime(); // We print our results fprintf(stdout, "ELLPACK: Matrix-Vector product of size %d x %d: time %lf GFLOPS %lf \n", M, N, (timer->getTime())/10, gpuflops); // We free the matrices and vectors delete timer; checkCudaErrors(cudaFree(d_IRP)); checkCudaErrors(cudaFree(d_x)); checkCudaErrors(cudaFree(d_JA_CSR)); checkCudaErrors(cudaFree(d_AS_CSR)); checkCudaErrors(cudaFree(d_JA_ELLPACK)); checkCudaErrors(cudaFree(d_AS_ELLPACK)); free(IRP); free(JA_CSR); free(AS_CSR); free(JA_ELLPACK); free(AS_ELLPACK); free(x); free(y); } return 0; }
853f8a9909b2f0695ddff35691cb2d27793b3d9e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/platform/device_context.h> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/partial_concat_op.h" #include "paddle/fluid/platform/float16.h" namespace plat = paddle::platform; namespace paddle { namespace operators { #define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) using LoDTensor = framework::LoDTensor; using Tensor = framework::Tensor; template <class T> __global__ void ConcatPartialCUDAKernel(T **in, T *out, int64_t all_length, int64_t in_batch_len, int64_t start_index, int64_t out_batch_len, int64_t part_length) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < all_length) { int64_t bs_id = id / out_batch_len; int64_t bs_index = id % out_batch_len; int64_t var_id = bs_index / part_length; int64_t part_index = bs_index % part_length; int64_t in_id = start_index + part_index; const T *tmp = in[var_id]; out[id] = tmp[bs_id * in_batch_len + in_id]; id += blockDim.x * gridDim.x; } } template <class T> __global__ void ConcatPartialGradCUDAKernel(T **in, const T *out, int64_t all_length, int64_t in_batch_len, int64_t start_index, int64_t out_batch_len, int64_t part_length) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < all_length) { int64_t bs_id = id / out_batch_len; int64_t bs_index = id % out_batch_len; int64_t var_id = bs_index / part_length; int64_t part_index = bs_index % part_length; int64_t in_id = start_index + part_index; T *tmp = in[var_id]; tmp[bs_id * in_batch_len + in_id] = out[id]; id += blockDim.x * gridDim.x; } } template <typename T> class PartialConcatOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { auto in_vars = ctx.MultiInput<Tensor>("X"); Tensor *out = ctx.Output<Tensor>("Out"); PADDLE_ENFORCE_EQ(in_vars[0] != nullptr, true, platform::errors::InvalidArgument( "The input of partial concat should not be null.")); auto input_dim = in_vars[0]->dims(); PADDLE_ENFORCE_EQ(input_dim.size(), 2, platform::errors::InvalidArgument( "Only supports 2-D array with batch size in the 1st " "dimension and data in the 2nd.")); auto in_size = input_dim[1]; // may be negative auto start_index = ctx.Attr<int>("start_index"); start_index = ComputeStartIndex(start_index, in_size); auto partial_len = ctx.Attr<int>("length"); if (partial_len < 0) { partial_len = in_size - start_index; } int in_num = in_vars.size(); int batch_size = input_dim[0]; int out_batch_len = partial_len * in_num; int all_length = batch_size * out_batch_len; constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = ctx.template device_context<phi::GPUContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; int grids; int blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = CEIL_DIV(length, tile_size); blocks = tile_size; }; auto place = ctx.GetPlace(); T *out_data = out->mutable_data<T>(place); std::vector<const T *> in_data; for (int i = 0; i < in_num; ++i) in_data.emplace_back(in_vars[i]->data<T>()); auto tmp_in_array = memory::Alloc( dev_ctx.GetPlace(), in_data.size() * sizeof(T *), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); memory::Copy(dev_ctx.GetPlace(), tmp_in_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(in_data.data()), in_data.size() * sizeof(T *), dev_ctx.stream()); T **in_array_data = reinterpret_cast<T **>(tmp_in_array->ptr()); ComputeKernelParameter(all_length); hipLaunchKernelGGL(( ConcatPartialCUDAKernel<T>), dim3(grids), dim3(blocks), 0, stream, in_array_data, out->data<T>(), all_length, in_size, start_index, out_batch_len, partial_len); } }; template <typename T> class PartialConcatGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { auto *out_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto ins = ctx.MultiInput<LoDTensor>("X"); auto outs = ctx.MultiOutput<LoDTensor>(framework::GradVarName("X")); PADDLE_ENFORCE_EQ(ins[0] != nullptr, true, platform::errors::InvalidArgument( "The input of partial concat should not be null.")); // all parameters auto batch_size = ins[0]->dims()[0]; auto in_size = ins[0]->dims()[1]; // may be negative auto start_index = ctx.Attr<int>("start_index"); start_index = ComputeStartIndex(start_index, in_size); auto partial_len = ctx.Attr<int>("length"); if (partial_len < 0) partial_len = in_size - start_index; auto in_num = ins.size(); auto grad_batch_len = partial_len * in_num; auto all_length = grad_batch_len * batch_size; // initialize auto &place = *ctx.template device_context<phi::GPUContext>().eigen_device(); for (size_t i = 0; i < outs.size(); ++i) { outs[i]->mutable_data<T>(ctx.GetPlace()); auto dxt = framework::EigenVector<T>::Flatten(*outs[i]); dxt.device(place) = dxt.constant(static_cast<T>(0)); } constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = ctx.template device_context<phi::GPUContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; int grids; int blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = CEIL_DIV(length, tile_size); blocks = tile_size; }; std::vector<const T *> out_data; for (size_t i = 0; i < in_num; ++i) { out_data.emplace_back(outs[i]->data<T>()); } auto tmp_out_array = memory::Alloc( dev_ctx.GetPlace(), out_data.size() * sizeof(T *), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); memory::Copy(dev_ctx.GetPlace(), tmp_out_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(out_data.data()), out_data.size() * sizeof(T *), dev_ctx.stream()); T **out_grad_data = reinterpret_cast<T **>(tmp_out_array->ptr()); ComputeKernelParameter(all_length); hipLaunchKernelGGL(( ConcatPartialGradCUDAKernel<T>) , dim3(grids), dim3(blocks), 0, stream, out_grad_data, out_grad->data<T>(), all_length, in_size, start_index, grad_batch_len, partial_len); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(partial_concat, ops::PartialConcatOpCUDAKernel<float>, ops::PartialConcatOpCUDAKernel<double>, ops::PartialConcatOpCUDAKernel<int>, ops::PartialConcatOpCUDAKernel<int64_t>, ops::PartialConcatOpCUDAKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(partial_concat_grad, ops::PartialConcatGradOpCUDAKernel<float>, ops::PartialConcatGradOpCUDAKernel<double>, ops::PartialConcatGradOpCUDAKernel<int>, ops::PartialConcatGradOpCUDAKernel<int64_t>, ops::PartialConcatGradOpCUDAKernel<plat::float16>);
853f8a9909b2f0695ddff35691cb2d27793b3d9e.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/platform/device_context.h> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/partial_concat_op.h" #include "paddle/fluid/platform/float16.h" namespace plat = paddle::platform; namespace paddle { namespace operators { #define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) using LoDTensor = framework::LoDTensor; using Tensor = framework::Tensor; template <class T> __global__ void ConcatPartialCUDAKernel(T **in, T *out, int64_t all_length, int64_t in_batch_len, int64_t start_index, int64_t out_batch_len, int64_t part_length) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < all_length) { int64_t bs_id = id / out_batch_len; int64_t bs_index = id % out_batch_len; int64_t var_id = bs_index / part_length; int64_t part_index = bs_index % part_length; int64_t in_id = start_index + part_index; const T *tmp = in[var_id]; out[id] = tmp[bs_id * in_batch_len + in_id]; id += blockDim.x * gridDim.x; } } template <class T> __global__ void ConcatPartialGradCUDAKernel(T **in, const T *out, int64_t all_length, int64_t in_batch_len, int64_t start_index, int64_t out_batch_len, int64_t part_length) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < all_length) { int64_t bs_id = id / out_batch_len; int64_t bs_index = id % out_batch_len; int64_t var_id = bs_index / part_length; int64_t part_index = bs_index % part_length; int64_t in_id = start_index + part_index; T *tmp = in[var_id]; tmp[bs_id * in_batch_len + in_id] = out[id]; id += blockDim.x * gridDim.x; } } template <typename T> class PartialConcatOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { auto in_vars = ctx.MultiInput<Tensor>("X"); Tensor *out = ctx.Output<Tensor>("Out"); PADDLE_ENFORCE_EQ(in_vars[0] != nullptr, true, platform::errors::InvalidArgument( "The input of partial concat should not be null.")); auto input_dim = in_vars[0]->dims(); PADDLE_ENFORCE_EQ(input_dim.size(), 2, platform::errors::InvalidArgument( "Only supports 2-D array with batch size in the 1st " "dimension and data in the 2nd.")); auto in_size = input_dim[1]; // may be negative auto start_index = ctx.Attr<int>("start_index"); start_index = ComputeStartIndex(start_index, in_size); auto partial_len = ctx.Attr<int>("length"); if (partial_len < 0) { partial_len = in_size - start_index; } int in_num = in_vars.size(); int batch_size = input_dim[0]; int out_batch_len = partial_len * in_num; int all_length = batch_size * out_batch_len; constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = ctx.template device_context<phi::GPUContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; int grids; int blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = CEIL_DIV(length, tile_size); blocks = tile_size; }; auto place = ctx.GetPlace(); T *out_data = out->mutable_data<T>(place); std::vector<const T *> in_data; for (int i = 0; i < in_num; ++i) in_data.emplace_back(in_vars[i]->data<T>()); auto tmp_in_array = memory::Alloc( dev_ctx.GetPlace(), in_data.size() * sizeof(T *), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); memory::Copy(dev_ctx.GetPlace(), tmp_in_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(in_data.data()), in_data.size() * sizeof(T *), dev_ctx.stream()); T **in_array_data = reinterpret_cast<T **>(tmp_in_array->ptr()); ComputeKernelParameter(all_length); ConcatPartialCUDAKernel<T><<<grids, blocks, 0, stream>>>(in_array_data, out->data<T>(), all_length, in_size, start_index, out_batch_len, partial_len); } }; template <typename T> class PartialConcatGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { auto *out_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto ins = ctx.MultiInput<LoDTensor>("X"); auto outs = ctx.MultiOutput<LoDTensor>(framework::GradVarName("X")); PADDLE_ENFORCE_EQ(ins[0] != nullptr, true, platform::errors::InvalidArgument( "The input of partial concat should not be null.")); // all parameters auto batch_size = ins[0]->dims()[0]; auto in_size = ins[0]->dims()[1]; // may be negative auto start_index = ctx.Attr<int>("start_index"); start_index = ComputeStartIndex(start_index, in_size); auto partial_len = ctx.Attr<int>("length"); if (partial_len < 0) partial_len = in_size - start_index; auto in_num = ins.size(); auto grad_batch_len = partial_len * in_num; auto all_length = grad_batch_len * batch_size; // initialize auto &place = *ctx.template device_context<phi::GPUContext>().eigen_device(); for (size_t i = 0; i < outs.size(); ++i) { outs[i]->mutable_data<T>(ctx.GetPlace()); auto dxt = framework::EigenVector<T>::Flatten(*outs[i]); dxt.device(place) = dxt.constant(static_cast<T>(0)); } constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = ctx.template device_context<phi::GPUContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; int grids; int blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = CEIL_DIV(length, tile_size); blocks = tile_size; }; std::vector<const T *> out_data; for (size_t i = 0; i < in_num; ++i) { out_data.emplace_back(outs[i]->data<T>()); } auto tmp_out_array = memory::Alloc( dev_ctx.GetPlace(), out_data.size() * sizeof(T *), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); memory::Copy(dev_ctx.GetPlace(), tmp_out_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(out_data.data()), out_data.size() * sizeof(T *), dev_ctx.stream()); T **out_grad_data = reinterpret_cast<T **>(tmp_out_array->ptr()); ComputeKernelParameter(all_length); ConcatPartialGradCUDAKernel<T> <<<grids, blocks, 0, stream>>>(out_grad_data, out_grad->data<T>(), all_length, in_size, start_index, grad_batch_len, partial_len); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(partial_concat, ops::PartialConcatOpCUDAKernel<float>, ops::PartialConcatOpCUDAKernel<double>, ops::PartialConcatOpCUDAKernel<int>, ops::PartialConcatOpCUDAKernel<int64_t>, ops::PartialConcatOpCUDAKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(partial_concat_grad, ops::PartialConcatGradOpCUDAKernel<float>, ops::PartialConcatGradOpCUDAKernel<double>, ops::PartialConcatGradOpCUDAKernel<int>, ops::PartialConcatGradOpCUDAKernel<int64_t>, ops::PartialConcatGradOpCUDAKernel<plat::float16>);
0b0971f078f1b6ab93e30fb8df15a7d13d1f5e37.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Copyright-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "CascadedCompressionGPU.h" #include "BitPackGPU.h" #include "CascadedMetadata.h" #include "CascadedMetadataOnGPU.h" #include "Check.h" #include "CudaUtils.h" #include "DeltaGPU.h" #include "RunLengthEncodeGPU.h" #include "TempSpaceBroker.h" #include "nvcomp.h" #include "nvcomp.hpp" #include "type_macros.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iostream> #include <limits> #include <memory> #include <sstream> #include <stdexcept> #include <string> namespace nvcomp { /****************************************************************************** * KERNELS ******************************************************************** *****************************************************************************/ namespace { template <typename T> __global__ void dereferenceDevice(T* const outValue, T* const* const ref) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); *outValue = **ref; } template <typename T> __global__ void configureBitPackHeader( CascadedMetadata::Header* const header, T** const minValueDevicePtr, unsigned char** const numBitsDevicePtr) { // setup the header and pointers into it assert(blockIdx.x == 0); assert(threadIdx.x == 0); *minValueDevicePtr = CascadedMetadata::getMinValueLocation<T>(header); *numBitsDevicePtr = &header->numBits; } /** * @brief Asynchronously perform a device to device copy, where the destination * address and number of elements to copy are stored on the device. * * @tparam T The type of element to copy. * @tparam BLOCK_SIZE The size of each thread block. * @param destDPtr The pointer to the destination address to copy elements to, * stored on the device. * @param src The source address to copy elements from. * @param numElementsDPtr The number of elements to copy, stored on the device. */ template <typename T, int BLOCK_SIZE> __global__ void deferredCopy( T** const destDPtr, const T* const src, const size_t* const numElementsDPtr) { assert(blockDim.x == BLOCK_SIZE); T* const dest = *destDPtr; const size_t num = *numElementsDPtr; for (int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; idx < num; idx += gridDim.x * BLOCK_SIZE) { dest[idx] = src[idx]; } } /** * @brief Asynchronously perform a device to device copy, where the number of * elements to copy is stored on the device. * * @tparam T The type of element to copy. * @tparam BLOCK_SIZE The size of each thread block to use. * @param dest The destination address to copy to. * @param src The source address to copy from. * @param numElementsDPtr The number of elements to copy, stored on the device. */ template <typename T, int BLOCK_SIZE> __global__ void deferredCopy( T* const dest, const T* const src, const size_t* const numElementsDPtr) { assert(blockDim.x == BLOCK_SIZE); const size_t num = *numElementsDPtr; for (int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; idx < num; idx += gridDim.x * BLOCK_SIZE) { dest[idx] = src[idx]; } } template <typename T> __global__ void offsetPointerAsync(T* const src, T** const dst, const size_t* const offset) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); *dst = src + *offset; } __global__ void offsetAndAlignPointerAsync( void* const src, void** const dst, size_t* const offset) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); // update the offset if we need to const size_t unalignedOffset = *offset; const size_t alignedOffset = roundUpTo(unalignedOffset, sizeof(size_t)); if (alignedOffset != unalignedOffset) { *offset = alignedOffset; } *dst = static_cast<char*>(src) + alignedOffset; } template <typename VALUE, typename RUN> __global__ void configTempSpacePointers( VALUE* const vals, VALUE** const valsPtr, RUN* const runs, RUN** const runsPtr, VALUE* const delta, VALUE** const deltaPtr) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); *valsPtr = vals; *runsPtr = runs; *deltaPtr = delta; } template <typename T> __global__ void increaseOffsetByBitPacking( size_t* const offsetDevice, const CascadedMetadata::Header* const header) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); const size_t temp_size = roundUpTo( roundUpDiv(header->length * header->numBits, 8ULL), sizeof(T)); *offsetDevice += temp_size; } template <typename T> __global__ void increaseOffsetByRaw( size_t* const offsetDevice, const CascadedMetadata::Header* const header) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); const size_t temp_size = header->length * sizeof(T); *offsetDevice += temp_size; } /** * @brief This kernel allows copying to the device from a stack variable * asynchronously. * * @tparam T The type of variable to copy. * @param hostValue The value to copy. * @param deviceValue The location to copy to. */ template <typename T> __global__ void asyncPODCopyKernel(const T hostValue, T* const deviceValue) { static_assert(std::is_pod<T>::value, "Must be a POD to do async copy."); assert(threadIdx.x == 0); assert(blockIdx.x == 0); *deviceValue = hostValue; } } // namespace /****************************************************************************** * HELPER FUNCTIONS *********************************************************** *****************************************************************************/ namespace { void checkAlignmentOf(void* const ptr, const size_t alignment) { void* aligned_ptr = ptr; size_t space = alignment; if (std::align(alignment, alignment, aligned_ptr, space) == nullptr || ptr != aligned_ptr) { std::ostringstream oss; oss << ptr; throw std::runtime_error( "Incorrectly aligned buffer: " + oss.str() + ", should be aligned to " + std::to_string(alignment)); } } /** * @brief This copies the input to the device from a stack variable * asynchronously. While this is inefficient, it is better than synchronizing or * pinning the variable. * * @tparam T The type of variable to copy. * @param hostValue The value to copy. * @param deviceValue The location to copy to. */ template <typename T> void asyncPODCopy(const T& value, T* const destination, hipStream_t stream) { hipLaunchKernelGGL(( asyncPODCopyKernel), dim3(dim3(1)), dim3(dim3(1)), 0, stream, value, destination); hipError_t err = hipGetLastError(); if (err != hipSuccess) { throw std::runtime_error( "Failed to launch asyncPODCopyKernel " "kernel: " + std::to_string(err)); } } /** * @brief Bit pack or copy the elements to an output address. * * @tparam T The type of element to pack/copy. * @param headerDPtr The header, stored on the device. * @param temp_ptr The temporary workspace allocated (on the device). * @param temp_bytes The size of the temporary workspace. * @param outputDPtr The pointer to the location to output the elements to (on * the device), stored on the device. * @param input The input elements (on the device). * @param numElementsDPtr The pointer to the number of elements, stored on the * device. * @param maxNum The maximum number of elements. * @param offsetDPtr The current offset output, to be increased by * the number of bytes written by this function. * @param bitPacking Whether or not to perform bitpacking on this data. * @param stream The stream to asynchronously perform work on. */ template <typename T> void packToOutput( CascadedMetadata::Header* const headerDPtr, void* const temp_ptr, const size_t temp_bytes, void** const outputDPtr, const T* const input, const size_t* const numElementsDPtr, const size_t maxNum, size_t* const offsetDPtr, const bool bitPacking, hipStream_t stream) { CudaUtils::copy_async( &(headerDPtr->length), numElementsDPtr, 1, DEVICE_TO_DEVICE, stream); if (bitPacking) { TempSpaceBroker tempSpace(temp_ptr, temp_bytes); void** bitPackOutputPtr; void** minValueDevicePtr; unsigned char** numBitsDevicePtr; tempSpace.reserve(&bitPackOutputPtr, 1); tempSpace.reserve(&minValueDevicePtr, 1); tempSpace.reserve(&numBitsDevicePtr, 1); hipLaunchKernelGGL(( configureBitPackHeader), dim3(1), dim3(1), 0, stream, headerDPtr, reinterpret_cast<T**>(minValueDevicePtr), numBitsDevicePtr); void* const packTemp = reinterpret_cast<void*>(numBitsDevicePtr + 1); const size_t packTempSize = temp_bytes - (static_cast<char*>(packTemp) - static_cast<char*>(temp_ptr)); BitPackGPU::compress( packTemp, packTempSize, getnvcompType<T>(), outputDPtr, input, numElementsDPtr, maxNum, minValueDevicePtr, numBitsDevicePtr, stream); hipLaunchKernelGGL(( increaseOffsetByBitPacking<T>), dim3(1), dim3(1), 0, stream, offsetDPtr, headerDPtr); } else { constexpr const int BLOCK_SIZE = 512; const dim3 grid(::min(1024, roundUpDiv<int, int>(maxNum, BLOCK_SIZE))); const dim3 block(BLOCK_SIZE); hipLaunchKernelGGL(( deferredCopy<T, BLOCK_SIZE>), dim3(grid), dim3(block), 0, stream, reinterpret_cast<T**>(outputDPtr), input, numElementsDPtr); hipLaunchKernelGGL(( increaseOffsetByRaw<T>), dim3(1), dim3(1), 0, stream, offsetDPtr, headerDPtr); } } template <typename valT, typename runT> void generateTypedOutputUpperBound( const void* const /*in_ptr*/, const size_t in_bytes, const nvcompCascadedFormatOpts* const opts, void* const temp_ptr, const size_t temp_bytes, size_t* const out_bytes) { if (temp_bytes > 0) { CHECK_NOT_NULL(temp_ptr); // only check if its non-null checkAlignmentOf(temp_ptr, sizeof(size_t)); } CascadedMetadata metadata(*opts, getnvcompType<valT>(), in_bytes, 0); const int numRLEs = metadata.getNumRLEs(); const int numDeltas = metadata.getNumDeltas(); const bool bitPacking = metadata.useBitPacking(); // assume single chunk for now // TODO: implement a multi-chunk version const size_t outputSize = in_bytes / sizeof(valT); assert(outputSize * sizeof(valT) == in_bytes); int vals_id = 0; // initialize config nvcompType_t type = getnvcompType<valT>(); nvcompIntConfig_t* config = createConfig(&metadata); // First past - set layers assume nothing actual compresses. // TODO: This will be a // gross over estimation of the output size, but the better option would // be to probably just assume 1:1 output/input, and error out during // compression if we fail to achieve that (maybe just set RLE, Delta, and BP // to 0, and do a memcpy, so that user's wont have to handle the error case // in their code). // A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally // having bit packing applied const int numSteps = ::max(numRLEs, numDeltas); for (int r = numSteps - 1; r >= 0; r--) { const int inputId = vals_id; if (numSteps - r - 1 < numRLEs) { const int runId = ++vals_id; const int valId = ++vals_id; nvcompConfigAddRLE_BP( config, inputId, outputSize, valId, type, bitPacking, runId, type, bitPacking); // store vals (apply delta if necessary) if (numRLEs - 1 - r < numDeltas) { const int deltaId = ++vals_id; if (r == 0) { nvcompConfigAddDelta_BP( config, valId, outputSize, deltaId, type, bitPacking); } else { nvcompConfigAddDelta_BP( config, deltaId, outputSize, valId, type, 0); // no bitpacking when delta is used as an intermediate step } } } else { // RLE-less step const int deltaId = ++vals_id; if (r == 0) { nvcompConfigAddDelta_BP( config, inputId, outputSize, deltaId, type, bitPacking); } else { nvcompConfigAddDelta_BP( config, deltaId, outputSize, inputId, type, 0); // no bitpacking when delta is used as an intermediate step } } } destroyConfig(config); // we will abort compression if we can't fit into out_bytes. const size_t serializedMetadataSize = CascadedMetadataOnGPU::getSerializedSizeOf(metadata); // This may be overkill, as most datatypes we use are aligned to size_t, // which on x86_64 is 8 bytes, where as this will be 16 bytes. In theory a // smart compiler could potentially generate instructions for some of our // structure that at 16-byte aligned. const size_t wordSize = alignof(std::max_align_t); // space for metadata, each set of 'runs', one set of 'vals'. *out_bytes = roundUpTo(serializedMetadataSize, wordSize) + roundUpTo(sizeof(runT) * outputSize, wordSize) * numRLEs + roundUpTo(sizeof(valT) * outputSize, wordSize); } template <typename valT, typename runT> void compressTypedAsync( const void* const in_ptr, const size_t in_bytes, const nvcompCascadedFormatOpts* const format_opts, void* const temp_ptr, const size_t temp_bytes, void* const out_ptr, size_t* const out_bytes, hipStream_t stream) { const nvcompType_t type = getnvcompType<valT>(); CascadedMetadata metadata(*format_opts, type, in_bytes, 0); const int numRLEs = metadata.getNumRLEs(); const int numDeltas = metadata.getNumDeltas(); const bool bitPacking = metadata.useBitPacking(); // assume single chunk for now // TODO: implement a multi-chunk version const size_t maxNum = in_bytes / sizeof(valT); int vals_id = 0; TempSpaceBroker tempSpace(temp_ptr, temp_bytes); size_t* offsetDevice; tempSpace.reserve(&offsetDevice, 1); CascadedMetadataOnGPU metadataOnGPU(out_ptr, *out_bytes); metadataOnGPU.copyToGPU(metadata, offsetDevice, stream); valT* vals_delta = nullptr; valT* vals_output = nullptr; runT* runs_output = nullptr; if (numRLEs > 0 || numDeltas > 0) { tempSpace.reserve(&vals_output, maxNum); if (numRLEs > 0) { tempSpace.reserve(&runs_output, maxNum); } tempSpace.reserve(&vals_delta, maxNum); } size_t* numRunsDevice; size_t* outputSizePtr; tempSpace.reserve(&numRunsDevice, 1); tempSpace.reserve(&outputSizePtr, 1); runT** runs_output_ptr; valT** vals_output_ptr; valT** vals_delta_ptr; tempSpace.reserve(&runs_output_ptr, 1); tempSpace.reserve(&vals_output_ptr, 1); tempSpace.reserve(&vals_delta_ptr, 1); void** bit_out_ptr; tempSpace.reserve(&bit_out_ptr, 1); hipError_t* statusDevice; tempSpace.reserve(&statusDevice, 1); hipLaunchKernelGGL(( configTempSpacePointers), dim3(1), dim3(1), 0, stream, vals_output, vals_output_ptr, runs_output, runs_output_ptr, vals_delta, vals_delta_ptr); // Set first offset to end of metadata metadataOnGPU.saveOffset(vals_id, offsetDevice, stream); // Second pass - perform compression and store in the memory allocated above. // A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally // having bit packing applied const int numSteps = ::max(numRLEs, numDeltas); for (int r = numSteps - 1; r >= 0; r--) { int nextValId; const bool firstLayer = r == ::max(numRLEs - 1, numDeltas - 1); const valT* const vals_input = firstLayer ? static_cast<const valT*>(in_ptr) : vals_delta; if (numSteps - r - 1 < numRLEs) { const int runId = ++vals_id; const int valId = ++vals_id; // rle always first if (firstLayer) { RunLengthEncodeGPU::compress( tempSpace.next(), tempSpace.spaceLeft(), getnvcompType<valT>(), vals_output, getnvcompType<runT>(), runs_output, numRunsDevice, vals_input, maxNum, stream); } else { RunLengthEncodeGPU::compressDownstream( tempSpace.next(), tempSpace.spaceLeft(), getnvcompType<valT>(), (void**)vals_output_ptr, getnvcompType<runT>(), (void**)runs_output_ptr, numRunsDevice, vals_input, outputSizePtr, maxNum, stream); } // save initial offset CascadedMetadata::Header* const valHdr = metadataOnGPU.getHeaderLocation(valId); CudaUtils::copy_async( &(valHdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); metadataOnGPU.saveOffset(valId, offsetDevice, stream); CascadedMetadata::Header* const runHdr = metadataOnGPU.getHeaderLocation(runId); CudaUtils::copy_async( &(runHdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); // store vals (apply delta if necessary) if (numRLEs - 1 - r < numDeltas) { DeltaGPU::compress( tempSpace.next(), tempSpace.spaceLeft(), getnvcompType<valT>(), (void**)vals_delta_ptr, vals_output, numRunsDevice, maxNum, stream); const int id = ++vals_id; nextValId = id; CascadedMetadata::Header* const hdr = metadataOnGPU.getHeaderLocation(id); CudaUtils::copy_async( &(hdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); metadataOnGPU.saveOffset(id, offsetDevice, stream); } else { constexpr const int COPY_BLOCK_SIZE = 512; const dim3 grid(::min( 4096, static_cast<int>(roundUpDiv(maxNum, COPY_BLOCK_SIZE)))); const dim3 block(COPY_BLOCK_SIZE); hipLaunchKernelGGL(( deferredCopy<valT, COPY_BLOCK_SIZE>), dim3(grid), dim3(block), 0, stream, vals_delta, vals_output, numRunsDevice); nextValId = valId; } hipLaunchKernelGGL(( offsetAndAlignPointerAsync), dim3(1), dim3(1), 0, stream, out_ptr, bit_out_ptr, offsetDevice); metadataOnGPU.saveOffset(runId, offsetDevice, stream); // pack runs into bytes packToOutput( metadataOnGPU.getHeaderLocation(runId), tempSpace.next(), tempSpace.spaceLeft(), bit_out_ptr, runs_output, numRunsDevice, maxNum, offsetDevice, bitPacking, stream); } else { if (!firstLayer) { CudaUtils::copy_async( numRunsDevice, outputSizePtr, 1, DEVICE_TO_DEVICE, stream); } else { CudaUtils::copy_async( numRunsDevice, &maxNum, 1, HOST_TO_DEVICE, stream); } // No RLE DeltaGPU::compress( tempSpace.next(), tempSpace.spaceLeft(), getnvcompType<valT>(), (void**)vals_output_ptr, vals_input, numRunsDevice, maxNum, stream); // we need to copy the delta to final delta buffer { constexpr const int COPY_BLOCK_SIZE = 512; const dim3 grid(::min( 4096, static_cast<int>(roundUpDiv(maxNum, COPY_BLOCK_SIZE)))); const dim3 block(COPY_BLOCK_SIZE); hipLaunchKernelGGL(( deferredCopy<valT, COPY_BLOCK_SIZE>), dim3(grid), dim3(block), 0, stream, vals_delta, vals_output, numRunsDevice); } const int id = ++vals_id; nextValId = id; CascadedMetadata::Header* const hdr = metadataOnGPU.getHeaderLocation(id); CudaUtils::copy_async( &(hdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); metadataOnGPU.saveOffset(id, offsetDevice, stream); } if (r == 0) { hipLaunchKernelGGL(( offsetAndAlignPointerAsync), dim3(1), dim3(1), 0, stream, out_ptr, bit_out_ptr, offsetDevice); metadataOnGPU.saveOffset(nextValId, offsetDevice, stream); // pack runs into bytes packToOutput( metadataOnGPU.getHeaderLocation(nextValId), tempSpace.next(), tempSpace.spaceLeft(), bit_out_ptr, vals_delta, numRunsDevice, maxNum, offsetDevice, bitPacking, stream); } else { // update current RLE size CudaUtils::copy_async( outputSizePtr, numRunsDevice, 1, DEVICE_TO_DEVICE, stream); } } // If there are no RLEs or Deltas, we will do a single BP step. if (numRLEs == 0 && numDeltas == 0) { const int nextValId = ++vals_id; const valT* const vals_input = static_cast<const valT*>(in_ptr); CudaUtils::copy_async(numRunsDevice, &maxNum, 1, HOST_TO_DEVICE, stream); hipLaunchKernelGGL(( offsetAndAlignPointerAsync), dim3(1), dim3(1), 0, stream, out_ptr, bit_out_ptr, offsetDevice); metadataOnGPU.saveOffset(nextValId, offsetDevice, stream); // pack runs into bytes packToOutput( metadataOnGPU.getHeaderLocation(nextValId), tempSpace.next(), tempSpace.spaceLeft(), bit_out_ptr, vals_input, numRunsDevice, maxNum, offsetDevice, bitPacking, stream); } // async copy output metadataOnGPU.setCompressedSizeFromGPU(offsetDevice, stream); CudaUtils::copy_async(out_bytes, offsetDevice, 1, DEVICE_TO_HOST, stream); } } // namespace /****************************************************************************** * PUBLIC STATIC METHODS ****************************************************** *****************************************************************************/ void nvcompCascadedCompressionGPU::computeWorkspaceSize( const void* /*in_ptr*/, const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const opts, size_t* const temp_bytes) { size_t kernelBytes = 0; // get at least enough for intermediate gpu values size_t ioBytes = 1024; const size_t numIn = in_bytes / sizeOfnvcompType(in_type); const nvcompType_t runType = selectRunsType(numIn); if (opts->use_bp) { // max of runs and values kernelBytes = ::max( kernelBytes, BitPackGPU::requiredWorkspaceSize(numIn, in_type)); kernelBytes = ::max( kernelBytes, BitPackGPU::requiredWorkspaceSize(numIn, runType)); } if (opts->num_deltas > 0) { kernelBytes = ::max( kernelBytes, DeltaGPU::requiredWorkspaceSize(numIn, in_type)); } if (opts->num_RLEs > 0) { kernelBytes = ::max( kernelBytes, RunLengthEncodeGPU::requiredWorkspaceSize(numIn, in_type, runType)); ioBytes += (2 * in_bytes) + numIn * sizeOfnvcompType(runType); } else if (opts->num_deltas > 0) { ioBytes += 2 * in_bytes; } *temp_bytes = kernelBytes + ioBytes; } void nvcompCascadedCompressionGPU::generateOutputUpperBound( const void* const in_ptr, const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const opts, void* const temp_ptr, const size_t temp_bytes, size_t* const out_bytes) { CHECK_NOT_NULL(in_ptr); CHECK_NOT_NULL(opts); if (temp_bytes > 0) { CHECK_NOT_NULL(temp_ptr); } CHECK_NOT_NULL(out_bytes); const nvcompType_t countType = selectRunsType(in_bytes / sizeOfnvcompType(in_type)); NVCOMP_TYPE_TWO_SWITCH( in_type, countType, generateTypedOutputUpperBound, in_ptr, in_bytes, opts, temp_ptr, temp_bytes, out_bytes); } void nvcompCascadedCompressionGPU::compressAsync( const void* const in_ptr, const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const cascadedOpts, void* const temp_ptr, const size_t temp_bytes, void* const out_ptr, size_t* const out_bytes, hipStream_t stream) { CHECK_NOT_NULL(in_ptr); CHECK_NOT_NULL(cascadedOpts); CHECK_NOT_NULL(temp_ptr); CHECK_NOT_NULL(out_ptr); CHECK_NOT_NULL(out_bytes); checkAlignmentOf(out_ptr, sizeof(size_t)); checkAlignmentOf(temp_ptr, sizeof(size_t)); const nvcompType_t countType = selectRunsType(in_bytes / sizeOfnvcompType(in_type)); NVCOMP_TYPE_TWO_SWITCH( in_type, countType, compressTypedAsync, in_ptr, in_bytes, cascadedOpts, temp_ptr, temp_bytes, out_ptr, out_bytes, stream); } } // namespace nvcomp
0b0971f078f1b6ab93e30fb8df15a7d13d1f5e37.cu
/* * Copyright (c) Copyright-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "CascadedCompressionGPU.h" #include "BitPackGPU.h" #include "CascadedMetadata.h" #include "CascadedMetadataOnGPU.h" #include "Check.h" #include "CudaUtils.h" #include "DeltaGPU.h" #include "RunLengthEncodeGPU.h" #include "TempSpaceBroker.h" #include "nvcomp.h" #include "nvcomp.hpp" #include "type_macros.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iostream> #include <limits> #include <memory> #include <sstream> #include <stdexcept> #include <string> namespace nvcomp { /****************************************************************************** * KERNELS ******************************************************************** *****************************************************************************/ namespace { template <typename T> __global__ void dereferenceDevice(T* const outValue, T* const* const ref) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); *outValue = **ref; } template <typename T> __global__ void configureBitPackHeader( CascadedMetadata::Header* const header, T** const minValueDevicePtr, unsigned char** const numBitsDevicePtr) { // setup the header and pointers into it assert(blockIdx.x == 0); assert(threadIdx.x == 0); *minValueDevicePtr = CascadedMetadata::getMinValueLocation<T>(header); *numBitsDevicePtr = &header->numBits; } /** * @brief Asynchronously perform a device to device copy, where the destination * address and number of elements to copy are stored on the device. * * @tparam T The type of element to copy. * @tparam BLOCK_SIZE The size of each thread block. * @param destDPtr The pointer to the destination address to copy elements to, * stored on the device. * @param src The source address to copy elements from. * @param numElementsDPtr The number of elements to copy, stored on the device. */ template <typename T, int BLOCK_SIZE> __global__ void deferredCopy( T** const destDPtr, const T* const src, const size_t* const numElementsDPtr) { assert(blockDim.x == BLOCK_SIZE); T* const dest = *destDPtr; const size_t num = *numElementsDPtr; for (int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; idx < num; idx += gridDim.x * BLOCK_SIZE) { dest[idx] = src[idx]; } } /** * @brief Asynchronously perform a device to device copy, where the number of * elements to copy is stored on the device. * * @tparam T The type of element to copy. * @tparam BLOCK_SIZE The size of each thread block to use. * @param dest The destination address to copy to. * @param src The source address to copy from. * @param numElementsDPtr The number of elements to copy, stored on the device. */ template <typename T, int BLOCK_SIZE> __global__ void deferredCopy( T* const dest, const T* const src, const size_t* const numElementsDPtr) { assert(blockDim.x == BLOCK_SIZE); const size_t num = *numElementsDPtr; for (int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; idx < num; idx += gridDim.x * BLOCK_SIZE) { dest[idx] = src[idx]; } } template <typename T> __global__ void offsetPointerAsync(T* const src, T** const dst, const size_t* const offset) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); *dst = src + *offset; } __global__ void offsetAndAlignPointerAsync( void* const src, void** const dst, size_t* const offset) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); // update the offset if we need to const size_t unalignedOffset = *offset; const size_t alignedOffset = roundUpTo(unalignedOffset, sizeof(size_t)); if (alignedOffset != unalignedOffset) { *offset = alignedOffset; } *dst = static_cast<char*>(src) + alignedOffset; } template <typename VALUE, typename RUN> __global__ void configTempSpacePointers( VALUE* const vals, VALUE** const valsPtr, RUN* const runs, RUN** const runsPtr, VALUE* const delta, VALUE** const deltaPtr) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); *valsPtr = vals; *runsPtr = runs; *deltaPtr = delta; } template <typename T> __global__ void increaseOffsetByBitPacking( size_t* const offsetDevice, const CascadedMetadata::Header* const header) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); const size_t temp_size = roundUpTo( roundUpDiv(header->length * header->numBits, 8ULL), sizeof(T)); *offsetDevice += temp_size; } template <typename T> __global__ void increaseOffsetByRaw( size_t* const offsetDevice, const CascadedMetadata::Header* const header) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); const size_t temp_size = header->length * sizeof(T); *offsetDevice += temp_size; } /** * @brief This kernel allows copying to the device from a stack variable * asynchronously. * * @tparam T The type of variable to copy. * @param hostValue The value to copy. * @param deviceValue The location to copy to. */ template <typename T> __global__ void asyncPODCopyKernel(const T hostValue, T* const deviceValue) { static_assert(std::is_pod<T>::value, "Must be a POD to do async copy."); assert(threadIdx.x == 0); assert(blockIdx.x == 0); *deviceValue = hostValue; } } // namespace /****************************************************************************** * HELPER FUNCTIONS *********************************************************** *****************************************************************************/ namespace { void checkAlignmentOf(void* const ptr, const size_t alignment) { void* aligned_ptr = ptr; size_t space = alignment; if (std::align(alignment, alignment, aligned_ptr, space) == nullptr || ptr != aligned_ptr) { std::ostringstream oss; oss << ptr; throw std::runtime_error( "Incorrectly aligned buffer: " + oss.str() + ", should be aligned to " + std::to_string(alignment)); } } /** * @brief This copies the input to the device from a stack variable * asynchronously. While this is inefficient, it is better than synchronizing or * pinning the variable. * * @tparam T The type of variable to copy. * @param hostValue The value to copy. * @param deviceValue The location to copy to. */ template <typename T> void asyncPODCopy(const T& value, T* const destination, cudaStream_t stream) { asyncPODCopyKernel<<<dim3(1), dim3(1), 0, stream>>>(value, destination); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { throw std::runtime_error( "Failed to launch asyncPODCopyKernel " "kernel: " + std::to_string(err)); } } /** * @brief Bit pack or copy the elements to an output address. * * @tparam T The type of element to pack/copy. * @param headerDPtr The header, stored on the device. * @param temp_ptr The temporary workspace allocated (on the device). * @param temp_bytes The size of the temporary workspace. * @param outputDPtr The pointer to the location to output the elements to (on * the device), stored on the device. * @param input The input elements (on the device). * @param numElementsDPtr The pointer to the number of elements, stored on the * device. * @param maxNum The maximum number of elements. * @param offsetDPtr The current offset output, to be increased by * the number of bytes written by this function. * @param bitPacking Whether or not to perform bitpacking on this data. * @param stream The stream to asynchronously perform work on. */ template <typename T> void packToOutput( CascadedMetadata::Header* const headerDPtr, void* const temp_ptr, const size_t temp_bytes, void** const outputDPtr, const T* const input, const size_t* const numElementsDPtr, const size_t maxNum, size_t* const offsetDPtr, const bool bitPacking, cudaStream_t stream) { CudaUtils::copy_async( &(headerDPtr->length), numElementsDPtr, 1, DEVICE_TO_DEVICE, stream); if (bitPacking) { TempSpaceBroker tempSpace(temp_ptr, temp_bytes); void** bitPackOutputPtr; void** minValueDevicePtr; unsigned char** numBitsDevicePtr; tempSpace.reserve(&bitPackOutputPtr, 1); tempSpace.reserve(&minValueDevicePtr, 1); tempSpace.reserve(&numBitsDevicePtr, 1); configureBitPackHeader<<<1, 1, 0, stream>>>( headerDPtr, reinterpret_cast<T**>(minValueDevicePtr), numBitsDevicePtr); void* const packTemp = reinterpret_cast<void*>(numBitsDevicePtr + 1); const size_t packTempSize = temp_bytes - (static_cast<char*>(packTemp) - static_cast<char*>(temp_ptr)); BitPackGPU::compress( packTemp, packTempSize, getnvcompType<T>(), outputDPtr, input, numElementsDPtr, maxNum, minValueDevicePtr, numBitsDevicePtr, stream); increaseOffsetByBitPacking<T><<<1, 1, 0, stream>>>(offsetDPtr, headerDPtr); } else { constexpr const int BLOCK_SIZE = 512; const dim3 grid(std::min(1024, roundUpDiv<int, int>(maxNum, BLOCK_SIZE))); const dim3 block(BLOCK_SIZE); deferredCopy<T, BLOCK_SIZE><<<grid, block, 0, stream>>>( reinterpret_cast<T**>(outputDPtr), input, numElementsDPtr); increaseOffsetByRaw<T><<<1, 1, 0, stream>>>(offsetDPtr, headerDPtr); } } template <typename valT, typename runT> void generateTypedOutputUpperBound( const void* const /*in_ptr*/, const size_t in_bytes, const nvcompCascadedFormatOpts* const opts, void* const temp_ptr, const size_t temp_bytes, size_t* const out_bytes) { if (temp_bytes > 0) { CHECK_NOT_NULL(temp_ptr); // only check if its non-null checkAlignmentOf(temp_ptr, sizeof(size_t)); } CascadedMetadata metadata(*opts, getnvcompType<valT>(), in_bytes, 0); const int numRLEs = metadata.getNumRLEs(); const int numDeltas = metadata.getNumDeltas(); const bool bitPacking = metadata.useBitPacking(); // assume single chunk for now // TODO: implement a multi-chunk version const size_t outputSize = in_bytes / sizeof(valT); assert(outputSize * sizeof(valT) == in_bytes); int vals_id = 0; // initialize config nvcompType_t type = getnvcompType<valT>(); nvcompIntConfig_t* config = createConfig(&metadata); // First past - set layers assume nothing actual compresses. // TODO: This will be a // gross over estimation of the output size, but the better option would // be to probably just assume 1:1 output/input, and error out during // compression if we fail to achieve that (maybe just set RLE, Delta, and BP // to 0, and do a memcpy, so that user's wont have to handle the error case // in their code). // A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally // having bit packing applied const int numSteps = std::max(numRLEs, numDeltas); for (int r = numSteps - 1; r >= 0; r--) { const int inputId = vals_id; if (numSteps - r - 1 < numRLEs) { const int runId = ++vals_id; const int valId = ++vals_id; nvcompConfigAddRLE_BP( config, inputId, outputSize, valId, type, bitPacking, runId, type, bitPacking); // store vals (apply delta if necessary) if (numRLEs - 1 - r < numDeltas) { const int deltaId = ++vals_id; if (r == 0) { nvcompConfigAddDelta_BP( config, valId, outputSize, deltaId, type, bitPacking); } else { nvcompConfigAddDelta_BP( config, deltaId, outputSize, valId, type, 0); // no bitpacking when delta is used as an intermediate step } } } else { // RLE-less step const int deltaId = ++vals_id; if (r == 0) { nvcompConfigAddDelta_BP( config, inputId, outputSize, deltaId, type, bitPacking); } else { nvcompConfigAddDelta_BP( config, deltaId, outputSize, inputId, type, 0); // no bitpacking when delta is used as an intermediate step } } } destroyConfig(config); // we will abort compression if we can't fit into out_bytes. const size_t serializedMetadataSize = CascadedMetadataOnGPU::getSerializedSizeOf(metadata); // This may be overkill, as most datatypes we use are aligned to size_t, // which on x86_64 is 8 bytes, where as this will be 16 bytes. In theory a // smart compiler could potentially generate instructions for some of our // structure that at 16-byte aligned. const size_t wordSize = alignof(std::max_align_t); // space for metadata, each set of 'runs', one set of 'vals'. *out_bytes = roundUpTo(serializedMetadataSize, wordSize) + roundUpTo(sizeof(runT) * outputSize, wordSize) * numRLEs + roundUpTo(sizeof(valT) * outputSize, wordSize); } template <typename valT, typename runT> void compressTypedAsync( const void* const in_ptr, const size_t in_bytes, const nvcompCascadedFormatOpts* const format_opts, void* const temp_ptr, const size_t temp_bytes, void* const out_ptr, size_t* const out_bytes, cudaStream_t stream) { const nvcompType_t type = getnvcompType<valT>(); CascadedMetadata metadata(*format_opts, type, in_bytes, 0); const int numRLEs = metadata.getNumRLEs(); const int numDeltas = metadata.getNumDeltas(); const bool bitPacking = metadata.useBitPacking(); // assume single chunk for now // TODO: implement a multi-chunk version const size_t maxNum = in_bytes / sizeof(valT); int vals_id = 0; TempSpaceBroker tempSpace(temp_ptr, temp_bytes); size_t* offsetDevice; tempSpace.reserve(&offsetDevice, 1); CascadedMetadataOnGPU metadataOnGPU(out_ptr, *out_bytes); metadataOnGPU.copyToGPU(metadata, offsetDevice, stream); valT* vals_delta = nullptr; valT* vals_output = nullptr; runT* runs_output = nullptr; if (numRLEs > 0 || numDeltas > 0) { tempSpace.reserve(&vals_output, maxNum); if (numRLEs > 0) { tempSpace.reserve(&runs_output, maxNum); } tempSpace.reserve(&vals_delta, maxNum); } size_t* numRunsDevice; size_t* outputSizePtr; tempSpace.reserve(&numRunsDevice, 1); tempSpace.reserve(&outputSizePtr, 1); runT** runs_output_ptr; valT** vals_output_ptr; valT** vals_delta_ptr; tempSpace.reserve(&runs_output_ptr, 1); tempSpace.reserve(&vals_output_ptr, 1); tempSpace.reserve(&vals_delta_ptr, 1); void** bit_out_ptr; tempSpace.reserve(&bit_out_ptr, 1); cudaError_t* statusDevice; tempSpace.reserve(&statusDevice, 1); configTempSpacePointers<<<1, 1, 0, stream>>>( vals_output, vals_output_ptr, runs_output, runs_output_ptr, vals_delta, vals_delta_ptr); // Set first offset to end of metadata metadataOnGPU.saveOffset(vals_id, offsetDevice, stream); // Second pass - perform compression and store in the memory allocated above. // A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally // having bit packing applied const int numSteps = std::max(numRLEs, numDeltas); for (int r = numSteps - 1; r >= 0; r--) { int nextValId; const bool firstLayer = r == std::max(numRLEs - 1, numDeltas - 1); const valT* const vals_input = firstLayer ? static_cast<const valT*>(in_ptr) : vals_delta; if (numSteps - r - 1 < numRLEs) { const int runId = ++vals_id; const int valId = ++vals_id; // rle always first if (firstLayer) { RunLengthEncodeGPU::compress( tempSpace.next(), tempSpace.spaceLeft(), getnvcompType<valT>(), vals_output, getnvcompType<runT>(), runs_output, numRunsDevice, vals_input, maxNum, stream); } else { RunLengthEncodeGPU::compressDownstream( tempSpace.next(), tempSpace.spaceLeft(), getnvcompType<valT>(), (void**)vals_output_ptr, getnvcompType<runT>(), (void**)runs_output_ptr, numRunsDevice, vals_input, outputSizePtr, maxNum, stream); } // save initial offset CascadedMetadata::Header* const valHdr = metadataOnGPU.getHeaderLocation(valId); CudaUtils::copy_async( &(valHdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); metadataOnGPU.saveOffset(valId, offsetDevice, stream); CascadedMetadata::Header* const runHdr = metadataOnGPU.getHeaderLocation(runId); CudaUtils::copy_async( &(runHdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); // store vals (apply delta if necessary) if (numRLEs - 1 - r < numDeltas) { DeltaGPU::compress( tempSpace.next(), tempSpace.spaceLeft(), getnvcompType<valT>(), (void**)vals_delta_ptr, vals_output, numRunsDevice, maxNum, stream); const int id = ++vals_id; nextValId = id; CascadedMetadata::Header* const hdr = metadataOnGPU.getHeaderLocation(id); CudaUtils::copy_async( &(hdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); metadataOnGPU.saveOffset(id, offsetDevice, stream); } else { constexpr const int COPY_BLOCK_SIZE = 512; const dim3 grid(std::min( 4096, static_cast<int>(roundUpDiv(maxNum, COPY_BLOCK_SIZE)))); const dim3 block(COPY_BLOCK_SIZE); deferredCopy<valT, COPY_BLOCK_SIZE><<<grid, block, 0, stream>>>( vals_delta, vals_output, numRunsDevice); nextValId = valId; } offsetAndAlignPointerAsync<<<1, 1, 0, stream>>>( out_ptr, bit_out_ptr, offsetDevice); metadataOnGPU.saveOffset(runId, offsetDevice, stream); // pack runs into bytes packToOutput( metadataOnGPU.getHeaderLocation(runId), tempSpace.next(), tempSpace.spaceLeft(), bit_out_ptr, runs_output, numRunsDevice, maxNum, offsetDevice, bitPacking, stream); } else { if (!firstLayer) { CudaUtils::copy_async( numRunsDevice, outputSizePtr, 1, DEVICE_TO_DEVICE, stream); } else { CudaUtils::copy_async( numRunsDevice, &maxNum, 1, HOST_TO_DEVICE, stream); } // No RLE DeltaGPU::compress( tempSpace.next(), tempSpace.spaceLeft(), getnvcompType<valT>(), (void**)vals_output_ptr, vals_input, numRunsDevice, maxNum, stream); // we need to copy the delta to final delta buffer { constexpr const int COPY_BLOCK_SIZE = 512; const dim3 grid(std::min( 4096, static_cast<int>(roundUpDiv(maxNum, COPY_BLOCK_SIZE)))); const dim3 block(COPY_BLOCK_SIZE); deferredCopy<valT, COPY_BLOCK_SIZE><<<grid, block, 0, stream>>>( vals_delta, vals_output, numRunsDevice); } const int id = ++vals_id; nextValId = id; CascadedMetadata::Header* const hdr = metadataOnGPU.getHeaderLocation(id); CudaUtils::copy_async( &(hdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); metadataOnGPU.saveOffset(id, offsetDevice, stream); } if (r == 0) { offsetAndAlignPointerAsync<<<1, 1, 0, stream>>>( out_ptr, bit_out_ptr, offsetDevice); metadataOnGPU.saveOffset(nextValId, offsetDevice, stream); // pack runs into bytes packToOutput( metadataOnGPU.getHeaderLocation(nextValId), tempSpace.next(), tempSpace.spaceLeft(), bit_out_ptr, vals_delta, numRunsDevice, maxNum, offsetDevice, bitPacking, stream); } else { // update current RLE size CudaUtils::copy_async( outputSizePtr, numRunsDevice, 1, DEVICE_TO_DEVICE, stream); } } // If there are no RLEs or Deltas, we will do a single BP step. if (numRLEs == 0 && numDeltas == 0) { const int nextValId = ++vals_id; const valT* const vals_input = static_cast<const valT*>(in_ptr); CudaUtils::copy_async(numRunsDevice, &maxNum, 1, HOST_TO_DEVICE, stream); offsetAndAlignPointerAsync<<<1, 1, 0, stream>>>( out_ptr, bit_out_ptr, offsetDevice); metadataOnGPU.saveOffset(nextValId, offsetDevice, stream); // pack runs into bytes packToOutput( metadataOnGPU.getHeaderLocation(nextValId), tempSpace.next(), tempSpace.spaceLeft(), bit_out_ptr, vals_input, numRunsDevice, maxNum, offsetDevice, bitPacking, stream); } // async copy output metadataOnGPU.setCompressedSizeFromGPU(offsetDevice, stream); CudaUtils::copy_async(out_bytes, offsetDevice, 1, DEVICE_TO_HOST, stream); } } // namespace /****************************************************************************** * PUBLIC STATIC METHODS ****************************************************** *****************************************************************************/ void nvcompCascadedCompressionGPU::computeWorkspaceSize( const void* /*in_ptr*/, const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const opts, size_t* const temp_bytes) { size_t kernelBytes = 0; // get at least enough for intermediate gpu values size_t ioBytes = 1024; const size_t numIn = in_bytes / sizeOfnvcompType(in_type); const nvcompType_t runType = selectRunsType(numIn); if (opts->use_bp) { // max of runs and values kernelBytes = std::max( kernelBytes, BitPackGPU::requiredWorkspaceSize(numIn, in_type)); kernelBytes = std::max( kernelBytes, BitPackGPU::requiredWorkspaceSize(numIn, runType)); } if (opts->num_deltas > 0) { kernelBytes = std::max( kernelBytes, DeltaGPU::requiredWorkspaceSize(numIn, in_type)); } if (opts->num_RLEs > 0) { kernelBytes = std::max( kernelBytes, RunLengthEncodeGPU::requiredWorkspaceSize(numIn, in_type, runType)); ioBytes += (2 * in_bytes) + numIn * sizeOfnvcompType(runType); } else if (opts->num_deltas > 0) { ioBytes += 2 * in_bytes; } *temp_bytes = kernelBytes + ioBytes; } void nvcompCascadedCompressionGPU::generateOutputUpperBound( const void* const in_ptr, const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const opts, void* const temp_ptr, const size_t temp_bytes, size_t* const out_bytes) { CHECK_NOT_NULL(in_ptr); CHECK_NOT_NULL(opts); if (temp_bytes > 0) { CHECK_NOT_NULL(temp_ptr); } CHECK_NOT_NULL(out_bytes); const nvcompType_t countType = selectRunsType(in_bytes / sizeOfnvcompType(in_type)); NVCOMP_TYPE_TWO_SWITCH( in_type, countType, generateTypedOutputUpperBound, in_ptr, in_bytes, opts, temp_ptr, temp_bytes, out_bytes); } void nvcompCascadedCompressionGPU::compressAsync( const void* const in_ptr, const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const cascadedOpts, void* const temp_ptr, const size_t temp_bytes, void* const out_ptr, size_t* const out_bytes, cudaStream_t stream) { CHECK_NOT_NULL(in_ptr); CHECK_NOT_NULL(cascadedOpts); CHECK_NOT_NULL(temp_ptr); CHECK_NOT_NULL(out_ptr); CHECK_NOT_NULL(out_bytes); checkAlignmentOf(out_ptr, sizeof(size_t)); checkAlignmentOf(temp_ptr, sizeof(size_t)); const nvcompType_t countType = selectRunsType(in_bytes / sizeOfnvcompType(in_type)); NVCOMP_TYPE_TWO_SWITCH( in_type, countType, compressTypedAsync, in_ptr, in_bytes, cascadedOpts, temp_ptr, temp_bytes, out_ptr, out_bytes, stream); } } // namespace nvcomp
ac429924fe76dc7a0bd938c3ca110fd36b15700c.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include "../ConstantTadHelper.h" #include <TAD.h> #include <ConstantHelper.h> #include <exceptions/cuda_exception.h> #include <execution/LaunchContext.h> #include <ShapeUtils.h> namespace nd4j { ConstantTadHelper::ConstantTadHelper() { auto numDevices = ConstantHelper::getNumberOfDevices(); for (int e = 0; e < numDevices; e++) { std::map<TadDescriptor, TadPack> pack; _cache.emplace_back(pack); } } ConstantTadHelper* ConstantTadHelper::getInstance() { if (!_INSTANCE) _INSTANCE = new ConstantTadHelper(); return _INSTANCE; } TadPack& ConstantTadHelper::tadForDimensions(Nd4jLong *originalShape, int dimension, const bool keepUnitiesInShape) { return tadForDimensions(originalShape, &dimension, 1, keepUnitiesInShape); } TadPack& ConstantTadHelper::tadForDimensions(Nd4jLong *originalShape, const std::vector<int> &dimensions, const bool keepUnitiesInShape) { return tadForDimensions(originalShape, const_cast<int *>(dimensions.data()), dimensions.size(), keepUnitiesInShape); } TadPack& ConstantTadHelper::tadForDimensions(Nd4jLong *originalShape, int* dimensions, int dimLength, const bool keepUnitiesInShape) { TadDescriptor tadDescriptor(originalShape, dimensions, dimLength, keepUnitiesInShape); return tadForDimensions(tadDescriptor); } TadPack& ConstantTadHelper::tadForDimensions(ShapeDescriptor &descriptor, std::vector<int> &dimensions, const bool keepUnitiesInShape) { TadDescriptor tadDescriptor(descriptor, dimensions, keepUnitiesInShape); return tadForDimensions(tadDescriptor); } TadPack& ConstantTadHelper::tadForDimensions(TadDescriptor &descriptor) { const int deviceId = ConstantHelper::getCurrentDevice(); _mutex.lock(); if (_cache[deviceId].count(descriptor) == 0) { const auto shapeInfo = descriptor.originalShape().toShapeInfo(); const int rank = shape::rank(shapeInfo); const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rank, descriptor.axis()); const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(shapeInfo, dimsToExclude); const int subArrRank = (rank == dimsToExclude.size() || descriptor.areUnitiesinShape()) ? rank : rank - dimsToExclude.size(); auto sPtr = new Nd4jLong[shape::shapeInfoLength(subArrRank)]; auto oPtr = new Nd4jLong[numOfSubArrs]; if (numOfSubArrs > 0) shape::calcSubArrShapeAndOffsets(shapeInfo, numOfSubArrs, dimsToExclude.size(), dimsToExclude.data(), sPtr, oPtr, descriptor.areUnitiesinShape()); Nd4jPointer soPtr; auto res = hipMalloc(reinterpret_cast<void**>(&soPtr), numOfSubArrs * sizeof(Nd4jLong)); if (res != 0) throw cuda_exception::build("Memory allocation for tadOffsets failed", res); res = hipMemcpy(soPtr, oPtr, numOfSubArrs * sizeof(Nd4jLong), hipMemcpyHostToDevice); if (res != 0) throw cuda_exception::build("tadOffsets copy failed", res); auto ssPtr = ConstantHelper::getInstance()->replicatePointer(sPtr, shape::shapeInfoByteLength(subArrRank)); ConstantDataBuffer shapesBuffer(sPtr, ssPtr, shape::shapeInfoLength(subArrRank) * sizeof(Nd4jLong), DataType::INT64); ConstantDataBuffer offsetsBuffer(oPtr, soPtr, numOfSubArrs * sizeof(Nd4jLong), DataType::INT64); TadPack t(shapesBuffer, offsetsBuffer, numOfSubArrs); _cache[deviceId][descriptor] = t; TadPack &r = _cache[deviceId][descriptor]; _mutex.unlock(); delete[] shapeInfo; return r; } else { TadPack &r = _cache[deviceId][descriptor]; _mutex.unlock(); return r; } } nd4j::ConstantTadHelper* nd4j::ConstantTadHelper::_INSTANCE = 0; }
ac429924fe76dc7a0bd938c3ca110fd36b15700c.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include "../ConstantTadHelper.h" #include <TAD.h> #include <ConstantHelper.h> #include <exceptions/cuda_exception.h> #include <execution/LaunchContext.h> #include <ShapeUtils.h> namespace nd4j { ConstantTadHelper::ConstantTadHelper() { auto numDevices = ConstantHelper::getNumberOfDevices(); for (int e = 0; e < numDevices; e++) { std::map<TadDescriptor, TadPack> pack; _cache.emplace_back(pack); } } ConstantTadHelper* ConstantTadHelper::getInstance() { if (!_INSTANCE) _INSTANCE = new ConstantTadHelper(); return _INSTANCE; } TadPack& ConstantTadHelper::tadForDimensions(Nd4jLong *originalShape, int dimension, const bool keepUnitiesInShape) { return tadForDimensions(originalShape, &dimension, 1, keepUnitiesInShape); } TadPack& ConstantTadHelper::tadForDimensions(Nd4jLong *originalShape, const std::vector<int> &dimensions, const bool keepUnitiesInShape) { return tadForDimensions(originalShape, const_cast<int *>(dimensions.data()), dimensions.size(), keepUnitiesInShape); } TadPack& ConstantTadHelper::tadForDimensions(Nd4jLong *originalShape, int* dimensions, int dimLength, const bool keepUnitiesInShape) { TadDescriptor tadDescriptor(originalShape, dimensions, dimLength, keepUnitiesInShape); return tadForDimensions(tadDescriptor); } TadPack& ConstantTadHelper::tadForDimensions(ShapeDescriptor &descriptor, std::vector<int> &dimensions, const bool keepUnitiesInShape) { TadDescriptor tadDescriptor(descriptor, dimensions, keepUnitiesInShape); return tadForDimensions(tadDescriptor); } TadPack& ConstantTadHelper::tadForDimensions(TadDescriptor &descriptor) { const int deviceId = ConstantHelper::getCurrentDevice(); _mutex.lock(); if (_cache[deviceId].count(descriptor) == 0) { const auto shapeInfo = descriptor.originalShape().toShapeInfo(); const int rank = shape::rank(shapeInfo); const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rank, descriptor.axis()); const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(shapeInfo, dimsToExclude); const int subArrRank = (rank == dimsToExclude.size() || descriptor.areUnitiesinShape()) ? rank : rank - dimsToExclude.size(); auto sPtr = new Nd4jLong[shape::shapeInfoLength(subArrRank)]; auto oPtr = new Nd4jLong[numOfSubArrs]; if (numOfSubArrs > 0) shape::calcSubArrShapeAndOffsets(shapeInfo, numOfSubArrs, dimsToExclude.size(), dimsToExclude.data(), sPtr, oPtr, descriptor.areUnitiesinShape()); Nd4jPointer soPtr; auto res = cudaMalloc(reinterpret_cast<void**>(&soPtr), numOfSubArrs * sizeof(Nd4jLong)); if (res != 0) throw cuda_exception::build("Memory allocation for tadOffsets failed", res); res = cudaMemcpy(soPtr, oPtr, numOfSubArrs * sizeof(Nd4jLong), cudaMemcpyHostToDevice); if (res != 0) throw cuda_exception::build("tadOffsets copy failed", res); auto ssPtr = ConstantHelper::getInstance()->replicatePointer(sPtr, shape::shapeInfoByteLength(subArrRank)); ConstantDataBuffer shapesBuffer(sPtr, ssPtr, shape::shapeInfoLength(subArrRank) * sizeof(Nd4jLong), DataType::INT64); ConstantDataBuffer offsetsBuffer(oPtr, soPtr, numOfSubArrs * sizeof(Nd4jLong), DataType::INT64); TadPack t(shapesBuffer, offsetsBuffer, numOfSubArrs); _cache[deviceId][descriptor] = t; TadPack &r = _cache[deviceId][descriptor]; _mutex.unlock(); delete[] shapeInfo; return r; } else { TadPack &r = _cache[deviceId][descriptor]; _mutex.unlock(); return r; } } nd4j::ConstantTadHelper* nd4j::ConstantTadHelper::_INSTANCE = 0; }
a1e996a1573f8ffc49a9662cb64cfb938ef7a6a8.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <sstream> #include <string> #include <cstring> #include <sys/stat.h> //for filesize #include <vector> #include <stdlib.h> #include <stdio.h> #include <iomanip> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" using namespace std; #define BLOCK_SIZE_BIT 128 #define BLOCK_SIZE_CHAR 16 typedef struct State { int bytes[4][4]; }State; struct block { unsigned char text[BLOCK_SIZE_CHAR]; }; __global__ void ByteSub(int A[][4], int B[][4]) { const int Matrix_ByteSub[][16] = { { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76 }, { 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0 }, { 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15 }, { 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75 }, { 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84 }, { 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf }, { 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8 }, { 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2 }, { 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73 }, { 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb }, { 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79 }, { 0xe7, 0xcb, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08 }, { 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a }, { 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e }, { 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe8, 0xce, 0x55, 0x28, 0xdf }, { 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 } }; int i = threadIdx.x; int j = threadIdx.y; if (i < 4) { if (j < 4) { //get the ASCII value as well as its decimal value int byteTemp = A[i][j]; //get the leftmost 4 bits aka the COLUMN byteTemp = A[i][j]; byteTemp = (byteTemp >> 4) & ((1 << 4) - 1); //leftmost 4 bits int column = byteTemp; //get the rightmost 4 bits aka the ROW byteTemp = A[i][j]; byteTemp = (byteTemp >> 0) & ((1 << 4) - 1); //rightmost 4 bits int row = byteTemp; //set the original bytes on the passed in matrix to the new bytes B[i][j] = Matrix_ByteSub[column][row]; } } } __global__ void ShiftRow(int A[][4], int B[][4]) { //NEED TO GET A INTO B int i = threadIdx.x; int j = threadIdx.y; int k = threadIdx.y; if (i < 4) { unsigned char tempBytes[4]; if (j < 4) { //perform the left shift as dependent upon the row tempBytes[j] = A[i][(j + i) % 4]; //B[i][j] = tempBytes[j]; } if (k < 4) { A[i][k] = tempBytes[k]; //B[i][k] = A[i][k]; } } for(int m = 0; m < 4; m ++) { for(int n = 0; n < 4; n++) { B[m][n]=A[m][n]; } } } __device__ unsigned char gmul(unsigned char a, unsigned char b) { //Original Author: Sam Trenholme //GF(2^8) multiplication/addition unsigned char p = 0; // the product of the multiplication unsigned char counter; unsigned char hi_bit_set; for (counter = 0; counter < 8; counter++) { // if b is odd, then add (XOR) the corresponding a to p (final product = sum of all a's corresponding to odd b's) if ((b & 1) == 1) { p ^= a; } // GF modulo: if a >= 128, then it will overflow when shifted left, so reduce hi_bit_set = (a & 0x80); a <<= 1; // if a's hi bit had a value of one prior to this rotation, exclusive or a with the hexadecimal number 0x1b // 0x1b = x^4 + x^3 + x + 1 of GF(2^8) irreducible polynomial if (hi_bit_set == 0x80) { a ^= 0x1b; } // rotate b one bit to the right, discarding the low bit, and making the high (eighth from left) bit have a value of zero b >>= 1; } return p; } __global__ void MixColumn(int A[][4], int B[][4]) { unsigned char col[4]; unsigned char copyCol[4]; int i = threadIdx.x; //int j = threadIdx.y; //int k = threadIdx.y; /* if (i < 4) { if (j < 4) { col[j] = A[j][i]; } // apply the mixColumn on one column unsigned char copyCol[4]; if (k < 4) { copyCol[k] = col[k]; } col[0] = gmul(copyCol[0], 2) ^ gmul(copyCol[1], 3) ^ gmul(copyCol[2], 1) ^ gmul(copyCol[3], 1); col[1] = gmul(copyCol[0], 1) ^ gmul(copyCol[1], 2) ^ gmul(copyCol[2], 3) ^ gmul(copyCol[3], 1); col[2] = gmul(copyCol[0], 1) ^ gmul(copyCol[1], 1) ^ gmul(copyCol[2], 2) ^ gmul(copyCol[3], 3); col[3] = gmul(copyCol[0], 3) ^ gmul(copyCol[1], 1) ^ gmul(copyCol[2], 1) ^ gmul(copyCol[3], 2); // put the values back into the state if (j < 4) { B[j][i] = col[j]; } } */ if (i < 4) { for(int m = 0; m < 4; m++) { copyCol[m] = A[m][i]; } // apply the mixColumn on one column col[0] = gmul(copyCol[0], 2) ^ gmul(copyCol[1], 3) ^ gmul(copyCol[2], 1) ^ gmul(copyCol[3], 1); col[1] = gmul(copyCol[0], 1) ^ gmul(copyCol[1], 2) ^ gmul(copyCol[2], 3) ^ gmul(copyCol[3], 1); col[2] = gmul(copyCol[0], 1) ^ gmul(copyCol[1], 1) ^ gmul(copyCol[2], 2) ^ gmul(copyCol[3], 3); col[3] = gmul(copyCol[0], 3) ^ gmul(copyCol[1], 1) ^ gmul(copyCol[2], 1) ^ gmul(copyCol[3], 2); // put the values back into the state for(int n = 0; n < 4; n++) { B[n][i] = col[n]; } } } int main() { int i, j; dim3 threadsPerBlock(4, 4); int test_matrix[4][4] = { { 0x00, 0x12, 0x0c, 0x08 }, { 0x04, 0x04, 0x00, 0x23 }, { 0x12, 0x12, 0x13, 0x19 }, { 0x14, 0x00, 0x11, 0x19 } }; State test; for (int i = 0; i < 4; i++) { for (int j =0; j < 4; j++) { test.bytes[i][j] = test_matrix[i][j]; } } int BS[4][4]; int SR[4][4]; int MC[4][4]; int(*d_test)[4]; int(*d_BS)[4]; int(*d_SR)[4]; int(*d_MC)[4]; hipMalloc((void**)&d_test, (4 * 4) * sizeof(int)); hipMalloc((void**)&d_BS, (4 * 4) * sizeof(int)); hipMalloc((void**)&d_SR, (4 * 4) * sizeof(int)); hipMalloc((void**)&d_MC, (4 * 4) * sizeof(int)); hipMemcpy(d_test, &test, (4 * 4) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_BS, BS, (4 * 4) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_SR, SR, (4 * 4) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_MC, MC, (4 * 4) * sizeof(int), hipMemcpyHostToDevice); /*------------------BYTESUB--------------------*/ hipLaunchKernelGGL(( ByteSub) , dim3(4), dim3(threadsPerBlock) , 0, 0, d_test, d_BS); hipMemcpy(&test, d_test, (4 * 4) * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(BS, d_BS, (4 * 4) * sizeof(int), hipMemcpyDeviceToHost); /*------------------SHIFTROW--------------------*/ State bytesub; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { bytesub.bytes[i][j] = BS[i][j]; } } int(*d_bytesub)[4]; hipMalloc((void**)&d_bytesub, (4 * 4) * sizeof(int)); hipMemcpy(d_bytesub, &bytesub, (4 * 4) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_SR, SR, (4 * 4) * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( ShiftRow), dim3(4), dim3(threadsPerBlock) , 0, 0, d_bytesub, d_SR); hipMemcpy(&bytesub, d_bytesub, (4 * 4) * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(SR, d_SR, (4 * 4) * sizeof(int), hipMemcpyDeviceToHost); /*------------------MIXCOLUMN--------------------*/ State shiftrow; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { shiftrow.bytes[i][j] = SR[i][j]; } } int(*d_shiftrow)[4]; hipMalloc((void**)&d_shiftrow, (4 * 4) * sizeof(int)); hipMemcpy(d_shiftrow, &shiftrow, (4 * 4) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_SR, SR, (4 * 4) * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( MixColumn) , dim3(4), dim3(threadsPerBlock) , 0, 0, d_shiftrow, d_MC); hipMemcpy(&shiftrow, d_shiftrow, (4 * 4) * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(MC, d_MC, (4 * 4) * sizeof(int), hipMemcpyDeviceToHost); printf("test = \n"); for (i = 0; i<4; i++) { for (j = 0; j<4; j++) { //printf("%d ", test.bytes[i][j]); cout << hex << test.bytes[i][j] << endl; } printf("\n\n"); } printf("BS = \n"); for (i = 0; i<4; i++) { for (j = 0; j<4; j++) { //printf("%d ", BS[i][j]); cout << hex << BS[i][j] << endl; } printf("\n\n"); } printf("SR = \n"); for (i = 0; i<4; i++) { for (j = 0; j<4; j++) { //printf("%d ", SR[i][j]); cout << hex << SR[i][j] << endl; } printf("\n\n"); } printf("MC = \n"); for (i = 0; i<4; i++) { for (j = 0; j<4; j++) { //printf("%d ", MC[i][j]); cout << hex << MC[i][j] << endl; } printf("\n\n"); } hipFree(d_test); hipFree(d_BS); hipFree(d_SR); hipFree(d_MC); return 0; }
a1e996a1573f8ffc49a9662cb64cfb938ef7a6a8.cu
#include <iostream> #include <fstream> #include <sstream> #include <string> #include <cstring> #include <sys/stat.h> //for filesize #include <vector> #include <stdlib.h> #include <stdio.h> #include <iomanip> #include "cuda_runtime.h" #include "device_launch_parameters.h" using namespace std; #define BLOCK_SIZE_BIT 128 #define BLOCK_SIZE_CHAR 16 typedef struct State { int bytes[4][4]; }State; struct block { unsigned char text[BLOCK_SIZE_CHAR]; }; __global__ void ByteSub(int A[][4], int B[][4]) { const int Matrix_ByteSub[][16] = { { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76 }, { 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0 }, { 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15 }, { 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75 }, { 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84 }, { 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf }, { 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8 }, { 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2 }, { 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73 }, { 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb }, { 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79 }, { 0xe7, 0xcb, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08 }, { 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a }, { 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e }, { 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe8, 0xce, 0x55, 0x28, 0xdf }, { 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 } }; int i = threadIdx.x; int j = threadIdx.y; if (i < 4) { if (j < 4) { //get the ASCII value as well as its decimal value int byteTemp = A[i][j]; //get the leftmost 4 bits aka the COLUMN byteTemp = A[i][j]; byteTemp = (byteTemp >> 4) & ((1 << 4) - 1); //leftmost 4 bits int column = byteTemp; //get the rightmost 4 bits aka the ROW byteTemp = A[i][j]; byteTemp = (byteTemp >> 0) & ((1 << 4) - 1); //rightmost 4 bits int row = byteTemp; //set the original bytes on the passed in matrix to the new bytes B[i][j] = Matrix_ByteSub[column][row]; } } } __global__ void ShiftRow(int A[][4], int B[][4]) { //NEED TO GET A INTO B int i = threadIdx.x; int j = threadIdx.y; int k = threadIdx.y; if (i < 4) { unsigned char tempBytes[4]; if (j < 4) { //perform the left shift as dependent upon the row tempBytes[j] = A[i][(j + i) % 4]; //B[i][j] = tempBytes[j]; } if (k < 4) { A[i][k] = tempBytes[k]; //B[i][k] = A[i][k]; } } for(int m = 0; m < 4; m ++) { for(int n = 0; n < 4; n++) { B[m][n]=A[m][n]; } } } __device__ unsigned char gmul(unsigned char a, unsigned char b) { //Original Author: Sam Trenholme //GF(2^8) multiplication/addition unsigned char p = 0; // the product of the multiplication unsigned char counter; unsigned char hi_bit_set; for (counter = 0; counter < 8; counter++) { // if b is odd, then add (XOR) the corresponding a to p (final product = sum of all a's corresponding to odd b's) if ((b & 1) == 1) { p ^= a; } // GF modulo: if a >= 128, then it will overflow when shifted left, so reduce hi_bit_set = (a & 0x80); a <<= 1; // if a's hi bit had a value of one prior to this rotation, exclusive or a with the hexadecimal number 0x1b // 0x1b = x^4 + x^3 + x + 1 of GF(2^8) irreducible polynomial if (hi_bit_set == 0x80) { a ^= 0x1b; } // rotate b one bit to the right, discarding the low bit, and making the high (eighth from left) bit have a value of zero b >>= 1; } return p; } __global__ void MixColumn(int A[][4], int B[][4]) { unsigned char col[4]; unsigned char copyCol[4]; int i = threadIdx.x; //int j = threadIdx.y; //int k = threadIdx.y; /* if (i < 4) { if (j < 4) { col[j] = A[j][i]; } // apply the mixColumn on one column unsigned char copyCol[4]; if (k < 4) { copyCol[k] = col[k]; } col[0] = gmul(copyCol[0], 2) ^ gmul(copyCol[1], 3) ^ gmul(copyCol[2], 1) ^ gmul(copyCol[3], 1); col[1] = gmul(copyCol[0], 1) ^ gmul(copyCol[1], 2) ^ gmul(copyCol[2], 3) ^ gmul(copyCol[3], 1); col[2] = gmul(copyCol[0], 1) ^ gmul(copyCol[1], 1) ^ gmul(copyCol[2], 2) ^ gmul(copyCol[3], 3); col[3] = gmul(copyCol[0], 3) ^ gmul(copyCol[1], 1) ^ gmul(copyCol[2], 1) ^ gmul(copyCol[3], 2); // put the values back into the state if (j < 4) { B[j][i] = col[j]; } } */ if (i < 4) { for(int m = 0; m < 4; m++) { copyCol[m] = A[m][i]; } // apply the mixColumn on one column col[0] = gmul(copyCol[0], 2) ^ gmul(copyCol[1], 3) ^ gmul(copyCol[2], 1) ^ gmul(copyCol[3], 1); col[1] = gmul(copyCol[0], 1) ^ gmul(copyCol[1], 2) ^ gmul(copyCol[2], 3) ^ gmul(copyCol[3], 1); col[2] = gmul(copyCol[0], 1) ^ gmul(copyCol[1], 1) ^ gmul(copyCol[2], 2) ^ gmul(copyCol[3], 3); col[3] = gmul(copyCol[0], 3) ^ gmul(copyCol[1], 1) ^ gmul(copyCol[2], 1) ^ gmul(copyCol[3], 2); // put the values back into the state for(int n = 0; n < 4; n++) { B[n][i] = col[n]; } } } int main() { int i, j; dim3 threadsPerBlock(4, 4); int test_matrix[4][4] = { { 0x00, 0x12, 0x0c, 0x08 }, { 0x04, 0x04, 0x00, 0x23 }, { 0x12, 0x12, 0x13, 0x19 }, { 0x14, 0x00, 0x11, 0x19 } }; State test; for (int i = 0; i < 4; i++) { for (int j =0; j < 4; j++) { test.bytes[i][j] = test_matrix[i][j]; } } int BS[4][4]; int SR[4][4]; int MC[4][4]; int(*d_test)[4]; int(*d_BS)[4]; int(*d_SR)[4]; int(*d_MC)[4]; cudaMalloc((void**)&d_test, (4 * 4) * sizeof(int)); cudaMalloc((void**)&d_BS, (4 * 4) * sizeof(int)); cudaMalloc((void**)&d_SR, (4 * 4) * sizeof(int)); cudaMalloc((void**)&d_MC, (4 * 4) * sizeof(int)); cudaMemcpy(d_test, &test, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_BS, BS, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_SR, SR, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_MC, MC, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice); /*------------------BYTESUB--------------------*/ ByteSub <<<4, threadsPerBlock >>>(d_test, d_BS); cudaMemcpy(&test, d_test, (4 * 4) * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(BS, d_BS, (4 * 4) * sizeof(int), cudaMemcpyDeviceToHost); /*------------------SHIFTROW--------------------*/ State bytesub; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { bytesub.bytes[i][j] = BS[i][j]; } } int(*d_bytesub)[4]; cudaMalloc((void**)&d_bytesub, (4 * 4) * sizeof(int)); cudaMemcpy(d_bytesub, &bytesub, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_SR, SR, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice); ShiftRow<<<4, threadsPerBlock >>>(d_bytesub, d_SR); cudaMemcpy(&bytesub, d_bytesub, (4 * 4) * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(SR, d_SR, (4 * 4) * sizeof(int), cudaMemcpyDeviceToHost); /*------------------MIXCOLUMN--------------------*/ State shiftrow; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { shiftrow.bytes[i][j] = SR[i][j]; } } int(*d_shiftrow)[4]; cudaMalloc((void**)&d_shiftrow, (4 * 4) * sizeof(int)); cudaMemcpy(d_shiftrow, &shiftrow, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_SR, SR, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice); MixColumn <<<4, threadsPerBlock >>>(d_shiftrow, d_MC); cudaMemcpy(&shiftrow, d_shiftrow, (4 * 4) * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(MC, d_MC, (4 * 4) * sizeof(int), cudaMemcpyDeviceToHost); printf("test = \n"); for (i = 0; i<4; i++) { for (j = 0; j<4; j++) { //printf("%d ", test.bytes[i][j]); cout << hex << test.bytes[i][j] << endl; } printf("\n\n"); } printf("BS = \n"); for (i = 0; i<4; i++) { for (j = 0; j<4; j++) { //printf("%d ", BS[i][j]); cout << hex << BS[i][j] << endl; } printf("\n\n"); } printf("SR = \n"); for (i = 0; i<4; i++) { for (j = 0; j<4; j++) { //printf("%d ", SR[i][j]); cout << hex << SR[i][j] << endl; } printf("\n\n"); } printf("MC = \n"); for (i = 0; i<4; i++) { for (j = 0; j<4; j++) { //printf("%d ", MC[i][j]); cout << hex << MC[i][j] << endl; } printf("\n\n"); } cudaFree(d_test); cudaFree(d_BS); cudaFree(d_SR); cudaFree(d_MC); return 0; }
d1e272ab1a880620d60a6f64a51a057952c80226.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "naive.h" namespace StreamCompaction { namespace Radix { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer(){ static PerformanceTimer timer; return timer; } int *dev_Data[2]; int *dev_bArray; int *dev_eArray; int *dev_fArray; int *dev_tArray; int *dev_dArray; __global__ void cudaSweepUp(int n, int d, int *data) { int index = threadIdx.x + (blockIdx.x * blockDim.x); int interval_length = 1 << (d + 1); if (index >= n) return; //int idx1 = index * interval_length + (1 << (d + 1)) - 1; //int idx2 = index * interval_length + (1 << d) - 1; data[index * interval_length + (1 << (d + 1)) - 1] += data[index * interval_length + (1 << d) - 1]; } __global__ void cudaSweepDown(int n, int d, int *data) { int index = threadIdx.x + (blockIdx.x * blockDim.x); int interval_length = 1 << (d + 1); // k from 0 to n-1 if (index >= n) return; int temp = data[index * interval_length + (1 << d) - 1]; data[index * interval_length + (1 << d) - 1] = data[index * interval_length + (1 << (d + 1)) - 1]; data[index * interval_length + (1 << (d + 1)) - 1] += temp; } __global__ void cudaGetBEArray(int pass, int *idata, int *odata1, int *odata2, int n){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) return; odata1[index] = (idata[index] >> pass) & 1; odata2[index] = odata1[index] ^ 1; } __global__ void cudaGetTArray(int *idata, int *e, int *odata, int n){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; int totalFalses = idata[n - 1] + e[n - 1]; if (index >= n) return; odata[index] = index - idata[index] + totalFalses; } __global__ void cudaGetDArray(int *idata1, int *idata2, int *idata3, int *odata, int n){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) return; odata[index] = idata1[index] ? idata2[index] : idata3[index]; } __global__ void cudaGetResult(int *idata, int *odata, int *data, int n){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) return; odata[idata[index]] = data[index]; } void RadixSort(int n, int *odata, int *idata){ int blockSize = 512; int blockNum; int celllog = ilog2ceil(n); int pow2len = 1 << celllog; int pout = 1; hipMalloc((void**)&dev_Data[0], n * sizeof(int)); hipMalloc((void**)&dev_Data[1], n * sizeof(int)); hipMalloc((void**)&dev_bArray, n * sizeof(int)); hipMalloc((void**)&dev_eArray, pow2len * sizeof(int)); hipMalloc((void**)&dev_fArray, pow2len * sizeof(int)); hipMalloc((void**)&dev_tArray, n * sizeof(int)); hipMalloc((void**)&dev_dArray, n * sizeof(int)); hipMemset(dev_eArray, 0, pow2len * sizeof(int)); hipMemcpy(dev_Data[0], idata, sizeof(int) * n, hipMemcpyHostToDevice); checkCUDAError("hipMemcpy to device failed!"); checkCUDAError("hipMalloc failed!"); int max_num = 0; for (int i = 0; i < n; i++) if (idata[i] > max_num) max_num = idata[i]; timer().startGpuTimer(); int pass = 0; while (true){ int pin = pout ^ 1; if ((max_num >> pass) == 0) break; blockNum = n / blockSize + 1; cudaGetBEArray << <blockNum, blockSize >> >(pass, dev_Data[pin], dev_bArray, dev_eArray, n); hipMemcpy(dev_fArray, dev_eArray, pow2len * sizeof(int), hipMemcpyDeviceToDevice); checkCUDAError("hipMemcpy device to device failed!"); for (int d = 0; d <= celllog - 1; d++){ int interval_length = (1 << (d + 1)); blockNum = (pow2len / interval_length + blockSize) / blockSize; cudaSweepUp << <blockNum, blockSize >> >(pow2len / interval_length, d, dev_fArray); } hipMemset(dev_fArray + pow2len - 1, 0, sizeof(int)); checkCUDAError("hipMemset failed!"); for (int d = celllog - 1; d >= 0; d--) { int interval_length = (1 << (d + 1)); blockNum = (pow2len / interval_length + blockSize) / blockSize; cudaSweepDown << <blockNum, blockSize >> >(pow2len / interval_length, d, dev_fArray); } blockNum = n / blockSize + 1; cudaGetTArray << <blockNum, blockSize >> > (dev_fArray, dev_eArray, dev_tArray, n); cudaGetDArray << <blockNum, blockSize >> >(dev_bArray, dev_tArray, dev_fArray, dev_dArray, n); cudaGetResult << <blockNum, blockSize >> >(dev_dArray, dev_Data[pout], dev_Data[pin], n); pass++; pout ^= 1; } timer().endGpuTimer(); hipMemcpy(odata, dev_Data[pout ^ 1], sizeof(int) * n, hipMemcpyDeviceToHost); checkCUDAError("hipMemcpy to host failed!"); hipFree(dev_Data[0]); hipFree(dev_Data[1]); hipFree(dev_bArray); hipFree(dev_eArray); hipFree(dev_fArray); hipFree(dev_tArray); hipFree(dev_dArray); } } }
d1e272ab1a880620d60a6f64a51a057952c80226.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "naive.h" namespace StreamCompaction { namespace Radix { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer(){ static PerformanceTimer timer; return timer; } int *dev_Data[2]; int *dev_bArray; int *dev_eArray; int *dev_fArray; int *dev_tArray; int *dev_dArray; __global__ void cudaSweepUp(int n, int d, int *data) { int index = threadIdx.x + (blockIdx.x * blockDim.x); int interval_length = 1 << (d + 1); if (index >= n) return; //int idx1 = index * interval_length + (1 << (d + 1)) - 1; //int idx2 = index * interval_length + (1 << d) - 1; data[index * interval_length + (1 << (d + 1)) - 1] += data[index * interval_length + (1 << d) - 1]; } __global__ void cudaSweepDown(int n, int d, int *data) { int index = threadIdx.x + (blockIdx.x * blockDim.x); int interval_length = 1 << (d + 1); // k from 0 to n-1 if (index >= n) return; int temp = data[index * interval_length + (1 << d) - 1]; data[index * interval_length + (1 << d) - 1] = data[index * interval_length + (1 << (d + 1)) - 1]; data[index * interval_length + (1 << (d + 1)) - 1] += temp; } __global__ void cudaGetBEArray(int pass, int *idata, int *odata1, int *odata2, int n){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) return; odata1[index] = (idata[index] >> pass) & 1; odata2[index] = odata1[index] ^ 1; } __global__ void cudaGetTArray(int *idata, int *e, int *odata, int n){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; int totalFalses = idata[n - 1] + e[n - 1]; if (index >= n) return; odata[index] = index - idata[index] + totalFalses; } __global__ void cudaGetDArray(int *idata1, int *idata2, int *idata3, int *odata, int n){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) return; odata[index] = idata1[index] ? idata2[index] : idata3[index]; } __global__ void cudaGetResult(int *idata, int *odata, int *data, int n){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) return; odata[idata[index]] = data[index]; } void RadixSort(int n, int *odata, int *idata){ int blockSize = 512; int blockNum; int celllog = ilog2ceil(n); int pow2len = 1 << celllog; int pout = 1; cudaMalloc((void**)&dev_Data[0], n * sizeof(int)); cudaMalloc((void**)&dev_Data[1], n * sizeof(int)); cudaMalloc((void**)&dev_bArray, n * sizeof(int)); cudaMalloc((void**)&dev_eArray, pow2len * sizeof(int)); cudaMalloc((void**)&dev_fArray, pow2len * sizeof(int)); cudaMalloc((void**)&dev_tArray, n * sizeof(int)); cudaMalloc((void**)&dev_dArray, n * sizeof(int)); cudaMemset(dev_eArray, 0, pow2len * sizeof(int)); cudaMemcpy(dev_Data[0], idata, sizeof(int) * n, cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpy to device failed!"); checkCUDAError("cudaMalloc failed!"); int max_num = 0; for (int i = 0; i < n; i++) if (idata[i] > max_num) max_num = idata[i]; timer().startGpuTimer(); int pass = 0; while (true){ int pin = pout ^ 1; if ((max_num >> pass) == 0) break; blockNum = n / blockSize + 1; cudaGetBEArray << <blockNum, blockSize >> >(pass, dev_Data[pin], dev_bArray, dev_eArray, n); cudaMemcpy(dev_fArray, dev_eArray, pow2len * sizeof(int), cudaMemcpyDeviceToDevice); checkCUDAError("cudaMemcpy device to device failed!"); for (int d = 0; d <= celllog - 1; d++){ int interval_length = (1 << (d + 1)); blockNum = (pow2len / interval_length + blockSize) / blockSize; cudaSweepUp << <blockNum, blockSize >> >(pow2len / interval_length, d, dev_fArray); } cudaMemset(dev_fArray + pow2len - 1, 0, sizeof(int)); checkCUDAError("cudaMemset failed!"); for (int d = celllog - 1; d >= 0; d--) { int interval_length = (1 << (d + 1)); blockNum = (pow2len / interval_length + blockSize) / blockSize; cudaSweepDown << <blockNum, blockSize >> >(pow2len / interval_length, d, dev_fArray); } blockNum = n / blockSize + 1; cudaGetTArray << <blockNum, blockSize >> > (dev_fArray, dev_eArray, dev_tArray, n); cudaGetDArray << <blockNum, blockSize >> >(dev_bArray, dev_tArray, dev_fArray, dev_dArray, n); cudaGetResult << <blockNum, blockSize >> >(dev_dArray, dev_Data[pout], dev_Data[pin], n); pass++; pout ^= 1; } timer().endGpuTimer(); cudaMemcpy(odata, dev_Data[pout ^ 1], sizeof(int) * n, cudaMemcpyDeviceToHost); checkCUDAError("cudaMemcpy to host failed!"); cudaFree(dev_Data[0]); cudaFree(dev_Data[1]); cudaFree(dev_bArray); cudaFree(dev_eArray); cudaFree(dev_fArray); cudaFree(dev_tArray); cudaFree(dev_dArray); } } }
ac946c4fce310e82c80a20831b88a8bba5e1c7c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> s d c @author Azzam Haidar @author Tingxing Dong */ #include "magma_internal.h" /******************************************************************************/ __global__ void kernel_zset_pointer( magmaDoubleComplex **output_array, magmaDoubleComplex *input, magma_int_t lda, magma_int_t row, magma_int_t column, magma_int_t batch_offset) { output_array[blockIdx.x] = input + blockIdx.x * batch_offset + row + column * lda; //printf("==> kernel_set_pointer input_array %p output_array %p \n",input+ blockIdx.x * batch_offset,output_array[blockIdx.x]); } /******************************************************************************/ // set pointer with variable size matrices, batch_offset becomes an array with accumulated sum of sizes // batch_offset[i] = sum( matrix_size[0], matrix_size[1], ..., matrix_size[i-1]) // batch_offset is usually the output of a prefix sum operation __global__ void kernel_zset_pointer_var( magmaDoubleComplex **output_array, magmaDoubleComplex *input, magma_int_t *lda, magma_int_t row, magma_int_t column, magma_int_t *batch_offset) { output_array[blockIdx.x] = input + batch_offset[blockIdx.x] + row + column * lda[blockIdx.x]; //printf("==> kernel_set_pointer input_array %p output_array %p \n",input+ blockIdx.x * batch_offset,output_array[blockIdx.x]); } /***************************************************************************//** Purpose ------- convert consecutive stored variable to array stored for example the size of A is N*batchCount; N is the size of A(batch_offset) change into dA_array[0] dA_array[1],... dA_array[batchCount-1], where the size of each dA_array[i] is N Arguments ---------- @param[out] output_array Array of pointers, dimension (batchCount). Each is a COMPLEX_16 array A of DIMENSION ( lda, column ) on the GPU @param[in] input COMPLEX_16 array of dimension ( LDDA, N*batchCount ) on the GPU. @param[in] lda INTEGER LDA specifies the leading dimension of A. @param[in] row INTEGER On entry, row specifies the number of rows of the matrix A. @param[in] column INTEGER On entry, column specifies the number of columns of the matrix A @param[in] batch_offset INTEGER The starting pointer of each matrix A in input arrray @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magma_zset_pointer( magmaDoubleComplex **output_array, magmaDoubleComplex *input, magma_int_t lda, magma_int_t row, magma_int_t column, magma_int_t batch_offset, magma_int_t batchCount, magma_queue_t queue) { hipLaunchKernelGGL(( kernel_zset_pointer) , dim3(batchCount), dim3(1), 0, queue->cuda_stream() , output_array, input, lda, row, column, batch_offset); } /******************************************************************************/ extern "C" void magma_zset_pointer_var_cc( magmaDoubleComplex **output_array, magmaDoubleComplex *input, magma_int_t *lda, magma_int_t row, magma_int_t column, magma_int_t *batch_offset, magma_int_t batchCount, magma_queue_t queue) { hipLaunchKernelGGL(( kernel_zset_pointer_var) , dim3(batchCount), dim3(1), 0, queue->cuda_stream() , output_array, input, lda, row, column, batch_offset); } /******************************************************************************/ __global__ void zdisplace_pointers_kernel(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t lda, magma_int_t row, magma_int_t column) { magmaDoubleComplex *inpt = input_array[blockIdx.x]; output_array[blockIdx.x] = &inpt[row + column * lda]; } /******************************************************************************/ /* Variable pointer displacement kernels */ /******************************************************************************/ // variable leading dimension, constant row and column offsets __global__ void zdisplace_pointers_var_cc_kernel(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t row, magma_int_t column) { const int bid = blockIdx.x; magmaDoubleComplex *inpt = input_array[blockIdx.x]; if(inpt == NULL || row < 0 || column < 0) output_array[bid] = NULL; else output_array[bid] = &inpt[row + column * lda[blockIdx.x] ]; } /******************************************************************************/ // variable leading dimension, constant row offset and variable column offsets __global__ void zdisplace_pointers_var_cv_kernel(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t row, magma_int_t *column) { const int bid = blockIdx.x; magmaDoubleComplex *inpt = input_array[blockIdx.x]; if(inpt == NULL || row < 0 || column[bid] < 0) output_array[bid] = NULL; else output_array[bid] = &inpt[row + column[bid] * lda[blockIdx.x] ]; } /******************************************************************************/ // variable leading dimension, variable row offset and constant column offsets __global__ void zdisplace_pointers_var_vc_kernel(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t *row, magma_int_t column) { const int bid = blockIdx.x; magmaDoubleComplex *inpt = input_array[blockIdx.x]; if(inpt == NULL || row[bid] < 0 || column < 0) output_array[bid] = NULL; else output_array[bid] = &inpt[row[bid] + column * lda[blockIdx.x] ]; } /******************************************************************************/ // variable leading dimension, variable row and column offsets __global__ void zdisplace_pointers_var_vv_kernel(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t* row, magma_int_t* column) { const int bid = blockIdx.x; magmaDoubleComplex *inpt = input_array[bid]; if(inpt == NULL || row[bid] < 0 || column[bid] < 0) output_array[bid] = NULL; else output_array[bid] = &inpt[ row[bid] + column[bid] * lda[bid] ]; } /***************************************************************************//** Purpose ------- compute the offset for all the matrices and save the displacment of the new pointer on output_array. input_array contains the pointers to the initial position. output_array[i] = input_array[i] + row + lda * column; Arguments ---------- @param[out] output_array Array of pointers, dimension (batchCount). Each pointer points to the new displacement of array A in input_array on the GPU @param[in] input_array Array of pointers, dimension (batchCount). Each is a COMPLEX_16 array A of DIMENSION ( lda, column ) on the GPU @param[in] lda INTEGER LDA specifies the leading dimension of A. @param[in] row INTEGER On entry, row specifies the number of rows of the matrix A. @param[in] column INTEGER On entry, column specifies the number of columns of the matrix A @param[in] batch_offset INTEGER The starting pointer of each matrix A in input arrray @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magma_zdisplace_pointers(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t lda, magma_int_t row, magma_int_t column, magma_int_t batchCount, magma_queue_t queue) { hipLaunchKernelGGL(( zdisplace_pointers_kernel) , dim3(batchCount), dim3(1), 0, queue->cuda_stream() , output_array, input_array, lda, row, column); } /******************************************************************************/ extern "C" void magma_zdisplace_pointers_var_cc(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t row, magma_int_t column, magma_int_t batchCount, magma_queue_t queue) { /* compute the offset for all the matrices and save the displacment of the new pointer on output_array. input_array contains the pointers to the initial position. output_array[i] = input_array[i] + row + lda[i] * column; */ hipLaunchKernelGGL(( zdisplace_pointers_var_cc_kernel), dim3(batchCount), dim3(1), 0, queue->cuda_stream(), output_array, input_array, lda, row, column); } /******************************************************************************/ extern "C" void magma_zdisplace_pointers_var_cv(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t row, magma_int_t* column, magma_int_t batchCount, magma_queue_t queue) { /* compute the offset for all the matrices and save the displacment of the new pointer on output_array. input_array contains the pointers to the initial position. output_array[i] = input_array[i] + row + lda[i] * column[i]; */ hipLaunchKernelGGL(( zdisplace_pointers_var_cv_kernel), dim3(batchCount), dim3(1), 0, queue->cuda_stream(), output_array, input_array, lda, row, column); } /******************************************************************************/ extern "C" void magma_zdisplace_pointers_var_vc(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t *row, magma_int_t column, magma_int_t batchCount, magma_queue_t queue) { /* compute the offset for all the matrices and save the displacment of the new pointer on output_array. input_array contains the pointers to the initial position. output_array[i] = input_array[i] + row[i] + lda[i] * column; */ hipLaunchKernelGGL(( zdisplace_pointers_var_vc_kernel), dim3(batchCount), dim3(1), 0, queue->cuda_stream(), output_array, input_array, lda, row, column); } /******************************************************************************/ extern "C" void magma_zdisplace_pointers_var_vv(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t* row, magma_int_t* column, magma_int_t batchCount, magma_queue_t queue) { /* compute the offset for all the matrices and save the displacment of the new pointer on output_array. input_array contains the pointers to the initial position. output_array[i] = input_array[i] + row[i] + lda[i] * column[i]; */ hipLaunchKernelGGL(( zdisplace_pointers_var_vv_kernel), dim3(batchCount), dim3(1), 0, queue->cuda_stream(), output_array, input_array, lda, row, column); }
ac946c4fce310e82c80a20831b88a8bba5e1c7c2.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> s d c @author Azzam Haidar @author Tingxing Dong */ #include "magma_internal.h" /******************************************************************************/ __global__ void kernel_zset_pointer( magmaDoubleComplex **output_array, magmaDoubleComplex *input, magma_int_t lda, magma_int_t row, magma_int_t column, magma_int_t batch_offset) { output_array[blockIdx.x] = input + blockIdx.x * batch_offset + row + column * lda; //printf("==> kernel_set_pointer input_array %p output_array %p \n",input+ blockIdx.x * batch_offset,output_array[blockIdx.x]); } /******************************************************************************/ // set pointer with variable size matrices, batch_offset becomes an array with accumulated sum of sizes // batch_offset[i] = sum( matrix_size[0], matrix_size[1], ..., matrix_size[i-1]) // batch_offset is usually the output of a prefix sum operation __global__ void kernel_zset_pointer_var( magmaDoubleComplex **output_array, magmaDoubleComplex *input, magma_int_t *lda, magma_int_t row, magma_int_t column, magma_int_t *batch_offset) { output_array[blockIdx.x] = input + batch_offset[blockIdx.x] + row + column * lda[blockIdx.x]; //printf("==> kernel_set_pointer input_array %p output_array %p \n",input+ blockIdx.x * batch_offset,output_array[blockIdx.x]); } /***************************************************************************//** Purpose ------- convert consecutive stored variable to array stored for example the size of A is N*batchCount; N is the size of A(batch_offset) change into dA_array[0] dA_array[1],... dA_array[batchCount-1], where the size of each dA_array[i] is N Arguments ---------- @param[out] output_array Array of pointers, dimension (batchCount). Each is a COMPLEX_16 array A of DIMENSION ( lda, column ) on the GPU @param[in] input COMPLEX_16 array of dimension ( LDDA, N*batchCount ) on the GPU. @param[in] lda INTEGER LDA specifies the leading dimension of A. @param[in] row INTEGER On entry, row specifies the number of rows of the matrix A. @param[in] column INTEGER On entry, column specifies the number of columns of the matrix A @param[in] batch_offset INTEGER The starting pointer of each matrix A in input arrray @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magma_zset_pointer( magmaDoubleComplex **output_array, magmaDoubleComplex *input, magma_int_t lda, magma_int_t row, magma_int_t column, magma_int_t batch_offset, magma_int_t batchCount, magma_queue_t queue) { kernel_zset_pointer <<< batchCount, 1, 0, queue->cuda_stream() >>> (output_array, input, lda, row, column, batch_offset); } /******************************************************************************/ extern "C" void magma_zset_pointer_var_cc( magmaDoubleComplex **output_array, magmaDoubleComplex *input, magma_int_t *lda, magma_int_t row, magma_int_t column, magma_int_t *batch_offset, magma_int_t batchCount, magma_queue_t queue) { kernel_zset_pointer_var <<< batchCount, 1, 0, queue->cuda_stream() >>> (output_array, input, lda, row, column, batch_offset); } /******************************************************************************/ __global__ void zdisplace_pointers_kernel(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t lda, magma_int_t row, magma_int_t column) { magmaDoubleComplex *inpt = input_array[blockIdx.x]; output_array[blockIdx.x] = &inpt[row + column * lda]; } /******************************************************************************/ /* Variable pointer displacement kernels */ /******************************************************************************/ // variable leading dimension, constant row and column offsets __global__ void zdisplace_pointers_var_cc_kernel(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t row, magma_int_t column) { const int bid = blockIdx.x; magmaDoubleComplex *inpt = input_array[blockIdx.x]; if(inpt == NULL || row < 0 || column < 0) output_array[bid] = NULL; else output_array[bid] = &inpt[row + column * lda[blockIdx.x] ]; } /******************************************************************************/ // variable leading dimension, constant row offset and variable column offsets __global__ void zdisplace_pointers_var_cv_kernel(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t row, magma_int_t *column) { const int bid = blockIdx.x; magmaDoubleComplex *inpt = input_array[blockIdx.x]; if(inpt == NULL || row < 0 || column[bid] < 0) output_array[bid] = NULL; else output_array[bid] = &inpt[row + column[bid] * lda[blockIdx.x] ]; } /******************************************************************************/ // variable leading dimension, variable row offset and constant column offsets __global__ void zdisplace_pointers_var_vc_kernel(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t *row, magma_int_t column) { const int bid = blockIdx.x; magmaDoubleComplex *inpt = input_array[blockIdx.x]; if(inpt == NULL || row[bid] < 0 || column < 0) output_array[bid] = NULL; else output_array[bid] = &inpt[row[bid] + column * lda[blockIdx.x] ]; } /******************************************************************************/ // variable leading dimension, variable row and column offsets __global__ void zdisplace_pointers_var_vv_kernel(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t* row, magma_int_t* column) { const int bid = blockIdx.x; magmaDoubleComplex *inpt = input_array[bid]; if(inpt == NULL || row[bid] < 0 || column[bid] < 0) output_array[bid] = NULL; else output_array[bid] = &inpt[ row[bid] + column[bid] * lda[bid] ]; } /***************************************************************************//** Purpose ------- compute the offset for all the matrices and save the displacment of the new pointer on output_array. input_array contains the pointers to the initial position. output_array[i] = input_array[i] + row + lda * column; Arguments ---------- @param[out] output_array Array of pointers, dimension (batchCount). Each pointer points to the new displacement of array A in input_array on the GPU @param[in] input_array Array of pointers, dimension (batchCount). Each is a COMPLEX_16 array A of DIMENSION ( lda, column ) on the GPU @param[in] lda INTEGER LDA specifies the leading dimension of A. @param[in] row INTEGER On entry, row specifies the number of rows of the matrix A. @param[in] column INTEGER On entry, column specifies the number of columns of the matrix A @param[in] batch_offset INTEGER The starting pointer of each matrix A in input arrray @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magma_zdisplace_pointers(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t lda, magma_int_t row, magma_int_t column, magma_int_t batchCount, magma_queue_t queue) { zdisplace_pointers_kernel <<< batchCount, 1, 0, queue->cuda_stream() >>> (output_array, input_array, lda, row, column); } /******************************************************************************/ extern "C" void magma_zdisplace_pointers_var_cc(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t row, magma_int_t column, magma_int_t batchCount, magma_queue_t queue) { /* compute the offset for all the matrices and save the displacment of the new pointer on output_array. input_array contains the pointers to the initial position. output_array[i] = input_array[i] + row + lda[i] * column; */ zdisplace_pointers_var_cc_kernel<<<batchCount, 1, 0, queue->cuda_stream()>>>(output_array, input_array, lda, row, column); } /******************************************************************************/ extern "C" void magma_zdisplace_pointers_var_cv(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t row, magma_int_t* column, magma_int_t batchCount, magma_queue_t queue) { /* compute the offset for all the matrices and save the displacment of the new pointer on output_array. input_array contains the pointers to the initial position. output_array[i] = input_array[i] + row + lda[i] * column[i]; */ zdisplace_pointers_var_cv_kernel<<<batchCount, 1, 0, queue->cuda_stream()>>>(output_array, input_array, lda, row, column); } /******************************************************************************/ extern "C" void magma_zdisplace_pointers_var_vc(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t *row, magma_int_t column, magma_int_t batchCount, magma_queue_t queue) { /* compute the offset for all the matrices and save the displacment of the new pointer on output_array. input_array contains the pointers to the initial position. output_array[i] = input_array[i] + row[i] + lda[i] * column; */ zdisplace_pointers_var_vc_kernel<<<batchCount, 1, 0, queue->cuda_stream()>>>(output_array, input_array, lda, row, column); } /******************************************************************************/ extern "C" void magma_zdisplace_pointers_var_vv(magmaDoubleComplex **output_array, magmaDoubleComplex **input_array, magma_int_t* lda, magma_int_t* row, magma_int_t* column, magma_int_t batchCount, magma_queue_t queue) { /* compute the offset for all the matrices and save the displacment of the new pointer on output_array. input_array contains the pointers to the initial position. output_array[i] = input_array[i] + row[i] + lda[i] * column[i]; */ zdisplace_pointers_var_vv_kernel<<<batchCount, 1, 0, queue->cuda_stream()>>>(output_array, input_array, lda, row, column); }
570059aa09c223863bebd809484b7c5ab6784d80.hip
// !!! This is a file automatically generated by hipify!!! #include "CudaAlgorithm.hpp" #include "HelperFunctions.hpp" #include <cstdlib> #include <SFML/Graphics.hpp> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> __device__ void alignChannel(int& channelValue) { channelValue = (channelValue > 255) ? 255 : channelValue; channelValue = (channelValue < 0) ? 0 : channelValue; } __global__ void applyFilterOnCuda( const sf::Uint8* inputImageData, sf::Uint8* outputImageData, const std::size_t width, const std::size_t height, const float* filter, const std::size_t kernelSize) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int kernelMargin = static_cast<int>(kernelSize / 2); if (((x - kernelMargin) > 0 && (x + kernelMargin) < width) && ((y - kernelMargin) > 0 && (y + kernelMargin) < height)) { int newRedChannel{}, newGreenChannel{}, newBlueChannel{}; for (int kernelX = -kernelMargin; kernelX <= kernelMargin; ++kernelX) { for (int kernelY = -kernelMargin; kernelY <= kernelMargin; ++kernelY) { const auto kernelXIndex = kernelX + kernelMargin; const auto kernelYIndex = kernelY + kernelMargin; const auto kernelIndex = kernelXIndex * kernelSize + kernelYIndex; const auto kernelValue = filter[kernelIndex]; const auto pixel = &inputImageData[((x + kernelX) + (y + kernelY) * width) * 4]; newRedChannel += static_cast<int>(pixel[0] * kernelValue); newGreenChannel += static_cast<int>(pixel[1] * kernelValue); newBlueChannel += static_cast<int>(pixel[2] * kernelValue); } } alignChannel(newRedChannel); alignChannel(newGreenChannel); alignChannel(newBlueChannel); auto outPixel = &outputImageData[(x + y * width) * 4]; outPixel[0] = newRedChannel; outPixel[1] = newGreenChannel; outPixel[2] = newBlueChannel; } } void Cuda::applyFilter(sf::Image& image, const Filter::Kernel& filter) { thrust::host_vector<sf::Uint8> hostImageData{ image.getPixelsPtr(), image.getPixelsPtr() + calculateImageSize(image) }; thrust::device_vector<sf::Uint8> devImageData(calculateImageSize(image)); thrust::device_vector<sf::Uint8> devOutputImageData(calculateImageSize(image)); thrust::copy(hostImageData.begin(), hostImageData.end(), devImageData.begin()); thrust::device_vector<float> devKernel{}; for (const auto& filterRow : filter) { devKernel.insert(devKernel.end(), filterRow.begin(), filterRow.end()); } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); dim3 threadsPerBlock(16, 16); dim3 numBlocks(static_cast<uint32_t>(ceil((float)image.getSize().x / threadsPerBlock.x)), static_cast<uint32_t>(ceil((float)image.getSize().y / threadsPerBlock.y))); hipLaunchKernelGGL(( applyFilterOnCuda), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, devImageData.data().get(), devOutputImageData.data().get(), image.getSize().x, image.getSize().y, devKernel.data().get(), filter.size()); hipEventRecord(stop); hipDeviceSynchronize(); hipEventSynchronize(stop); float timeMs{}; hipEventElapsedTime(&timeMs, start, stop); std::cout << "[CUDA] (only CUDA calculations): " << timeMs << " ms" << std::endl; thrust::copy(devOutputImageData.begin(), devOutputImageData.end(), hostImageData.begin()); image.create(image.getSize().x, image.getSize().y, hostImageData.data()); }
570059aa09c223863bebd809484b7c5ab6784d80.cu
#include "CudaAlgorithm.hpp" #include "HelperFunctions.hpp" #include <cstdlib> #include <SFML/Graphics.hpp> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> __device__ void alignChannel(int& channelValue) { channelValue = (channelValue > 255) ? 255 : channelValue; channelValue = (channelValue < 0) ? 0 : channelValue; } __global__ void applyFilterOnCuda( const sf::Uint8* inputImageData, sf::Uint8* outputImageData, const std::size_t width, const std::size_t height, const float* filter, const std::size_t kernelSize) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int kernelMargin = static_cast<int>(kernelSize / 2); if (((x - kernelMargin) > 0 && (x + kernelMargin) < width) && ((y - kernelMargin) > 0 && (y + kernelMargin) < height)) { int newRedChannel{}, newGreenChannel{}, newBlueChannel{}; for (int kernelX = -kernelMargin; kernelX <= kernelMargin; ++kernelX) { for (int kernelY = -kernelMargin; kernelY <= kernelMargin; ++kernelY) { const auto kernelXIndex = kernelX + kernelMargin; const auto kernelYIndex = kernelY + kernelMargin; const auto kernelIndex = kernelXIndex * kernelSize + kernelYIndex; const auto kernelValue = filter[kernelIndex]; const auto pixel = &inputImageData[((x + kernelX) + (y + kernelY) * width) * 4]; newRedChannel += static_cast<int>(pixel[0] * kernelValue); newGreenChannel += static_cast<int>(pixel[1] * kernelValue); newBlueChannel += static_cast<int>(pixel[2] * kernelValue); } } alignChannel(newRedChannel); alignChannel(newGreenChannel); alignChannel(newBlueChannel); auto outPixel = &outputImageData[(x + y * width) * 4]; outPixel[0] = newRedChannel; outPixel[1] = newGreenChannel; outPixel[2] = newBlueChannel; } } void Cuda::applyFilter(sf::Image& image, const Filter::Kernel& filter) { thrust::host_vector<sf::Uint8> hostImageData{ image.getPixelsPtr(), image.getPixelsPtr() + calculateImageSize(image) }; thrust::device_vector<sf::Uint8> devImageData(calculateImageSize(image)); thrust::device_vector<sf::Uint8> devOutputImageData(calculateImageSize(image)); thrust::copy(hostImageData.begin(), hostImageData.end(), devImageData.begin()); thrust::device_vector<float> devKernel{}; for (const auto& filterRow : filter) { devKernel.insert(devKernel.end(), filterRow.begin(), filterRow.end()); } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); dim3 threadsPerBlock(16, 16); dim3 numBlocks(static_cast<uint32_t>(ceil((float)image.getSize().x / threadsPerBlock.x)), static_cast<uint32_t>(ceil((float)image.getSize().y / threadsPerBlock.y))); applyFilterOnCuda<<<numBlocks, threadsPerBlock>>>( devImageData.data().get(), devOutputImageData.data().get(), image.getSize().x, image.getSize().y, devKernel.data().get(), filter.size()); cudaEventRecord(stop); cudaDeviceSynchronize(); cudaEventSynchronize(stop); float timeMs{}; cudaEventElapsedTime(&timeMs, start, stop); std::cout << "[CUDA] (only CUDA calculations): " << timeMs << " ms" << std::endl; thrust::copy(devOutputImageData.begin(), devOutputImageData.end(), hostImageData.begin()); image.create(image.getSize().x, image.getSize().y, hostImageData.data()); }
00d0310c7c8d166a5e573b5fe21275d54fb99296.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Stacked-batch-data version of point interpolation, modified from the original implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" #include "interpolate_gpu.h" __global__ void three_nn_kernel_stack(int batch_size, int N, int M, const float *unknown, const int *unknown_batch_cnt, const float *known, const int *known_batch_cnt, float *dist2, int *idx) { // unknown: (N1 + N2 ..., 3) // unknown_batch_cnt: (batch_size), [N1, N2, ...] // known: (M1 + M2 ..., 3) // known_batch_cnt: (batch_size), [M1, M2, ...] // Return: // dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors // idx: (N1 + N2 ..., 3) index of the three nearest neighbors int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (pt_idx >= N) return; int bs_idx = 0, pt_cnt = unknown_batch_cnt[0]; for (int k = 1; k < batch_size; k++){ if (pt_idx < pt_cnt) break; pt_cnt += unknown_batch_cnt[k]; bs_idx = k; } int cur_num_known_points = known_batch_cnt[bs_idx]; int known_batch_start_idx = 0; for (int k = 0; k < bs_idx; k++) known_batch_start_idx += known_batch_cnt[k]; known += known_batch_start_idx * 3; unknown += pt_idx * 3; dist2 += pt_idx * 3; idx += pt_idx * 3; float ux = unknown[0]; float uy = unknown[1]; float uz = unknown[2]; double best1 = 1e40, best2 = 1e40, best3 = 1e40; int besti1 = 0, besti2 = 0, besti3 = 0; for (int k = 0; k < cur_num_known_points; ++k) { float x = known[k * 3 + 0]; float y = known[k * 3 + 1]; float z = known[k * 3 + 2]; float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); if (d < best1) { best3 = best2; besti3 = besti2; best2 = best1; besti2 = besti1; best1 = d; besti1 = k; } else if (d < best2) { best3 = best2; besti3 = besti2; best2 = d; besti2 = k; } else if (d < best3) { best3 = d; besti3 = k; } } dist2[0] = best1; dist2[1] = best2; dist2[2] = best3; idx[0] = besti1 + known_batch_start_idx; idx[1] = besti2 + known_batch_start_idx; idx[2] = besti3 + known_batch_start_idx; } void three_nn_kernel_launcher_stack(int batch_size, int N, int M, const float *unknown, const int *unknown_batch_cnt, const float *known, const int *known_batch_cnt, float *dist2, int *idx) { // unknown: (N1 + N2 ..., 3) // unknown_batch_cnt: (batch_size), [N1, N2, ...] // known: (M1 + M2 ..., 3) // known_batch_cnt: (batch_size), [M1, M2, ...] // Return: // dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors // idx: (N1 + N2 ..., 3) index of the three nearest neighbors hipError_t err; dim3 blocks(DIVUP(N, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( three_nn_kernel_stack), dim3(blocks), dim3(threads), 0, 0, batch_size, N, M, unknown, unknown_batch_cnt, known, known_batch_cnt, dist2, idx ); err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } __global__ void three_interpolate_kernel_stack(int N, int channels, const float *features, const int *idx, const float *weight, float *out) { // features: (M1 + M2 ..., C) // idx: [N1 + N2 ..., 3] // weight: [N1 + N2 ..., 3] // Return: // out: (N1 + N2 ..., C) int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (pt_idx >= N || c_idx >= channels) return; weight += pt_idx * 3; idx += pt_idx * 3; out += pt_idx * channels + c_idx; out[0] = weight[0] * features[idx[0] * channels + c_idx] + weight[1] * features[idx[1] * channels + c_idx] + weight[2] * features[idx[2] * channels + c_idx]; } void three_interpolate_kernel_launcher_stack(int N, int channels, const float *features, const int *idx, const float *weight, float *out) { // features: (M1 + M2 ..., C) // idx: [N1 + N2 ..., 3] // weight: [N1 + N2 ..., 3] // Return: // out: (N1 + N2 ..., C) hipError_t err; dim3 blocks(DIVUP(N, THREADS_PER_BLOCK), channels); dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( three_interpolate_kernel_stack), dim3(blocks), dim3(threads), 0, 0, N, channels, features, idx, weight, out); err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } __global__ void three_interpolate_grad_kernel_stack(int N, int channels, const float *grad_out, const int *idx, const float *weight, float *grad_features) { // grad_out_tensor: (N1 + N2 ..., C) // idx_tensor: [N1 + N2 ..., 3] // weight_tensor: [N1 + N2 ..., 3] // Return: // grad_features_tensor: (M1 + M2 ..., C) int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (pt_idx >= N || c_idx >= channels) return; grad_out += pt_idx * channels + c_idx; weight += pt_idx * 3; idx += pt_idx * 3; // printf("pt_idx=%d, c_idx=%d, idx=(%d, %d, %d), grad_out=%f\n", pt_idx, c_idx, idx[0], idx[1], idx[2], grad_out[0]); atomicAdd(grad_features + idx[0] * channels + c_idx, grad_out[0] * weight[0]); atomicAdd(grad_features + idx[1] * channels + c_idx, grad_out[0] * weight[1]); atomicAdd(grad_features + idx[2] * channels + c_idx, grad_out[0] * weight[2]); } void three_interpolate_grad_kernel_launcher_stack(int N, int channels, const float *grad_out, const int *idx, const float *weight, float *grad_features) { // grad_out_tensor: (N1 + N2 ..., C) // idx_tensor: [N1 + N2 ..., 3] // weight_tensor: [N1 + N2 ..., 3] // Return: // grad_features_tensor: (M1 + M2 ..., C) hipError_t err; dim3 blocks(DIVUP(N, THREADS_PER_BLOCK), channels); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( three_interpolate_grad_kernel_stack), dim3(blocks), dim3(threads), 0, 0, N, channels, grad_out, idx, weight, grad_features ); err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } }
00d0310c7c8d166a5e573b5fe21275d54fb99296.cu
/* Stacked-batch-data version of point interpolation, modified from the original implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" #include "interpolate_gpu.h" __global__ void three_nn_kernel_stack(int batch_size, int N, int M, const float *unknown, const int *unknown_batch_cnt, const float *known, const int *known_batch_cnt, float *dist2, int *idx) { // unknown: (N1 + N2 ..., 3) // unknown_batch_cnt: (batch_size), [N1, N2, ...] // known: (M1 + M2 ..., 3) // known_batch_cnt: (batch_size), [M1, M2, ...] // Return: // dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors // idx: (N1 + N2 ..., 3) index of the three nearest neighbors int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (pt_idx >= N) return; int bs_idx = 0, pt_cnt = unknown_batch_cnt[0]; for (int k = 1; k < batch_size; k++){ if (pt_idx < pt_cnt) break; pt_cnt += unknown_batch_cnt[k]; bs_idx = k; } int cur_num_known_points = known_batch_cnt[bs_idx]; int known_batch_start_idx = 0; for (int k = 0; k < bs_idx; k++) known_batch_start_idx += known_batch_cnt[k]; known += known_batch_start_idx * 3; unknown += pt_idx * 3; dist2 += pt_idx * 3; idx += pt_idx * 3; float ux = unknown[0]; float uy = unknown[1]; float uz = unknown[2]; double best1 = 1e40, best2 = 1e40, best3 = 1e40; int besti1 = 0, besti2 = 0, besti3 = 0; for (int k = 0; k < cur_num_known_points; ++k) { float x = known[k * 3 + 0]; float y = known[k * 3 + 1]; float z = known[k * 3 + 2]; float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); if (d < best1) { best3 = best2; besti3 = besti2; best2 = best1; besti2 = besti1; best1 = d; besti1 = k; } else if (d < best2) { best3 = best2; besti3 = besti2; best2 = d; besti2 = k; } else if (d < best3) { best3 = d; besti3 = k; } } dist2[0] = best1; dist2[1] = best2; dist2[2] = best3; idx[0] = besti1 + known_batch_start_idx; idx[1] = besti2 + known_batch_start_idx; idx[2] = besti3 + known_batch_start_idx; } void three_nn_kernel_launcher_stack(int batch_size, int N, int M, const float *unknown, const int *unknown_batch_cnt, const float *known, const int *known_batch_cnt, float *dist2, int *idx) { // unknown: (N1 + N2 ..., 3) // unknown_batch_cnt: (batch_size), [N1, N2, ...] // known: (M1 + M2 ..., 3) // known_batch_cnt: (batch_size), [M1, M2, ...] // Return: // dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors // idx: (N1 + N2 ..., 3) index of the three nearest neighbors cudaError_t err; dim3 blocks(DIVUP(N, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); three_nn_kernel_stack<<<blocks, threads>>>( batch_size, N, M, unknown, unknown_batch_cnt, known, known_batch_cnt, dist2, idx ); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void three_interpolate_kernel_stack(int N, int channels, const float *features, const int *idx, const float *weight, float *out) { // features: (M1 + M2 ..., C) // idx: [N1 + N2 ..., 3] // weight: [N1 + N2 ..., 3] // Return: // out: (N1 + N2 ..., C) int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (pt_idx >= N || c_idx >= channels) return; weight += pt_idx * 3; idx += pt_idx * 3; out += pt_idx * channels + c_idx; out[0] = weight[0] * features[idx[0] * channels + c_idx] + weight[1] * features[idx[1] * channels + c_idx] + weight[2] * features[idx[2] * channels + c_idx]; } void three_interpolate_kernel_launcher_stack(int N, int channels, const float *features, const int *idx, const float *weight, float *out) { // features: (M1 + M2 ..., C) // idx: [N1 + N2 ..., 3] // weight: [N1 + N2 ..., 3] // Return: // out: (N1 + N2 ..., C) cudaError_t err; dim3 blocks(DIVUP(N, THREADS_PER_BLOCK), channels); dim3 threads(THREADS_PER_BLOCK); three_interpolate_kernel_stack<<<blocks, threads>>>(N, channels, features, idx, weight, out); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void three_interpolate_grad_kernel_stack(int N, int channels, const float *grad_out, const int *idx, const float *weight, float *grad_features) { // grad_out_tensor: (N1 + N2 ..., C) // idx_tensor: [N1 + N2 ..., 3] // weight_tensor: [N1 + N2 ..., 3] // Return: // grad_features_tensor: (M1 + M2 ..., C) int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (pt_idx >= N || c_idx >= channels) return; grad_out += pt_idx * channels + c_idx; weight += pt_idx * 3; idx += pt_idx * 3; // printf("pt_idx=%d, c_idx=%d, idx=(%d, %d, %d), grad_out=%f\n", pt_idx, c_idx, idx[0], idx[1], idx[2], grad_out[0]); atomicAdd(grad_features + idx[0] * channels + c_idx, grad_out[0] * weight[0]); atomicAdd(grad_features + idx[1] * channels + c_idx, grad_out[0] * weight[1]); atomicAdd(grad_features + idx[2] * channels + c_idx, grad_out[0] * weight[2]); } void three_interpolate_grad_kernel_launcher_stack(int N, int channels, const float *grad_out, const int *idx, const float *weight, float *grad_features) { // grad_out_tensor: (N1 + N2 ..., C) // idx_tensor: [N1 + N2 ..., 3] // weight_tensor: [N1 + N2 ..., 3] // Return: // grad_features_tensor: (M1 + M2 ..., C) cudaError_t err; dim3 blocks(DIVUP(N, THREADS_PER_BLOCK), channels); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); three_interpolate_grad_kernel_stack<<<blocks, threads>>>( N, channels, grad_out, idx, weight, grad_features ); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
8d4d79395b9f239ef1d037981adc498efaa2b291.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ void MakeCountSegment_sharedAtomic(float *segment, int *bins, const int seglength, int *segCounter, int *s_counter, const int countlength, const float low, const float high, const float slope) { int bin; float temp; for (int jj=0; jj<seglength; jj++){ temp = abs(segment[jj]); if ( ( temp > low ) & ( temp < high ) ) { bin = (int)ceil(slope*abs(high-temp)); } else if (temp >= high) { bin = 0; } else bin = countlength - 1; bins[jj]=bin; atomicAdd(s_counter+bin,1); } for (int jj=0; jj<countlength; jj++) segCounter[jj]=s_counter[jj]; return; } __device__ void MakeCountSegment(float *segment, int *bins, const int seglength, int *segCounter, const int countlength, const float low, const float high, const float slope) { int bin; float temp; for (int jj=0; jj<seglength; jj++){ temp = abs(segment[jj]); if ( ( temp > low ) & ( temp < high ) ) { bin = (int)ceil(slope*abs(high-temp)); } else if (temp >= high) { bin = 0; } else bin = countlength - 1; bins[jj]=bin; segCounter[bin] = segCounter[bin] + 1; } return; } __global__ void make_and_count_seg_sharedAtomic(float *vec, int *bin, int *segcounter, const int length, const int countlength, const int HighLength, const int HighSegmentLength, const int threadsHigh, const int LowSegmentLength, const float low, const float high, const float slope) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int startIndex, SegmentLength, startCountIndex; extern __shared__ int s_counter[]; startCountIndex = xIndex*countlength; if ( (xIndex*HighSegmentLength > HighLength) & ( (HighLength + (xIndex-threadsHigh+1)*LowSegmentLength) < length ) ){ startIndex = HighLength + (xIndex-threadsHigh)*LowSegmentLength; SegmentLength = LowSegmentLength; } else { startIndex = xIndex*HighSegmentLength; SegmentLength = HighSegmentLength; } MakeCountSegment_sharedAtomic(vec+startIndex, bin+startIndex, SegmentLength, segcounter+startCountIndex, s_counter, countlength, low, high, slope); }
8d4d79395b9f239ef1d037981adc498efaa2b291.cu
#include "includes.h" __device__ void MakeCountSegment_sharedAtomic(float *segment, int *bins, const int seglength, int *segCounter, int *s_counter, const int countlength, const float low, const float high, const float slope) { int bin; float temp; for (int jj=0; jj<seglength; jj++){ temp = abs(segment[jj]); if ( ( temp > low ) & ( temp < high ) ) { bin = (int)ceil(slope*abs(high-temp)); } else if (temp >= high) { bin = 0; } else bin = countlength - 1; bins[jj]=bin; atomicAdd(s_counter+bin,1); } for (int jj=0; jj<countlength; jj++) segCounter[jj]=s_counter[jj]; return; } __device__ void MakeCountSegment(float *segment, int *bins, const int seglength, int *segCounter, const int countlength, const float low, const float high, const float slope) { int bin; float temp; for (int jj=0; jj<seglength; jj++){ temp = abs(segment[jj]); if ( ( temp > low ) & ( temp < high ) ) { bin = (int)ceil(slope*abs(high-temp)); } else if (temp >= high) { bin = 0; } else bin = countlength - 1; bins[jj]=bin; segCounter[bin] = segCounter[bin] + 1; } return; } __global__ void make_and_count_seg_sharedAtomic(float *vec, int *bin, int *segcounter, const int length, const int countlength, const int HighLength, const int HighSegmentLength, const int threadsHigh, const int LowSegmentLength, const float low, const float high, const float slope) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int startIndex, SegmentLength, startCountIndex; extern __shared__ int s_counter[]; startCountIndex = xIndex*countlength; if ( (xIndex*HighSegmentLength > HighLength) & ( (HighLength + (xIndex-threadsHigh+1)*LowSegmentLength) < length ) ){ startIndex = HighLength + (xIndex-threadsHigh)*LowSegmentLength; SegmentLength = LowSegmentLength; } else { startIndex = xIndex*HighSegmentLength; SegmentLength = HighSegmentLength; } MakeCountSegment_sharedAtomic(vec+startIndex, bin+startIndex, SegmentLength, segcounter+startCountIndex, s_counter, countlength, low, high, slope); }
efb46f29e6b4a77aa923a2032e8c764b1f529e55.hip
// !!! This is a file automatically generated by hipify!!! // Includes #include <stdio.h> #include <stdlib.h> // includes from project // includes from CUDA #include <hip/hip_runtime.h> //#include <helper_math.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; // Functions void CleanupResources(void); void RandomInit(float*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const float* A, const float* B, float* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1 = 0; float Value2 = 0; float Value3; float Value; float I1=A[i]; float I2=B[i]; // Excessive Addition access if((i%32)<=3){ for(unsigned k=0; k<iterations;k++) { Value1=I1+I2; Value3=I1-I2; Value1+=Value2; Value1+=Value2; Value2=Value3-Value1; Value1=Value2+Value3; } } __syncthreads(); Value=Value1; C[i]=Value+Value2; } int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( hipMalloc((void**)&d_A, size) ); checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); printf("after\n"); hipEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); checkCudaErrors(hipEventRecord(start)); hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations); checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); hipDeviceSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
efb46f29e6b4a77aa923a2032e8c764b1f529e55.cu
// Includes #include <stdio.h> #include <stdlib.h> // includes from project // includes from CUDA #include <cuda_runtime.h> //#include <helper_math.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; // Functions void CleanupResources(void); void RandomInit(float*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const float* A, const float* B, float* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1 = 0; float Value2 = 0; float Value3; float Value; float I1=A[i]; float I2=B[i]; // Excessive Addition access if((i%32)<=3){ for(unsigned k=0; k<iterations;k++) { Value1=I1+I2; Value3=I1-I2; Value1+=Value2; Value1+=Value2; Value2=Value3-Value1; Value1=Value2+Value3; } } __syncthreads(); Value=Value1; C[i]=Value+Value2; } int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( cudaMalloc((void**)&d_A, size) ); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); printf("after\n"); cudaEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); checkCudaErrors(cudaEventRecord(start)); PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations); checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); cudaThreadSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
66427f1bc3dc9f79b25a2fd6e1e295709ab8e975.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/SegmentReduce.h> #include <ATen/core/Tensor.h> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/cub.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/zeros.h> #include <ATen/ops/cat.h> #include <ATen/ops/cumsum.h> #endif namespace at::native { namespace { struct CustomMax { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { if (at::_isnan(a)) { return a; } else if (at::_isnan(b)) { return b; } return std::max<OutputT>(a, b); } }; struct CustomSum { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { return a + b; } }; struct CustomProd { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { return a * b; } }; struct CustomMin { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { if (at::_isnan(a)) { return a; } else if (at::_isnan(b)) { return b; } return std::min<OutputT>(a, b); } }; Tensor _get_complete_sum(const Tensor& lengths) { int64_t segment_count = lengths.numel(); TORCH_CHECK(segment_count < INT_MAX); auto offsets = at::empty({segment_count + 1}, lengths.options()); offsets[0].zero_(); AT_DISPATCH_INDEX_TYPES( lengths.scalar_type(), "_segment_reduce_cuda_lengths_offsets_backward_kernel1", ([&] { auto* lengths_data_ptr = lengths.const_data_ptr<index_t>(); auto* offsets_data_ptr = offsets.mutable_data_ptr<index_t>(); at::cuda::cub::inclusive_sum( lengths_data_ptr, offsets_data_ptr + 1, segment_count); })); return offsets; } template <typename scalar_t, typename index_t> __global__ static void post_sum_div_kernel( scalar_t* output_data, const index_t* lengths_data, const int64_t segment_count, bool is_initial_set, scalar_t initial) { CUDA_KERNEL_LOOP(index, segment_count) { CUDA_KERNEL_ASSERT(lengths_data[index] >= 0); if (lengths_data[index] == 0) { if (is_initial_set) { output_data[index] = initial; } else { output_data[index] = NAN; } } else if (!at::_isnan(output_data[index])) { output_data[index] = output_data[index] / lengths_data[index]; } } } template <typename scalar_t, typename index_t> __global__ void segment_reduce_forward_kernel( ReductionType reduction, scalar_t* output_data, const scalar_t* values_data, const index_t* lengths_data, const index_t* lengths_cumsum_data, const int64_t segment_count, const int64_t lengths_stride_axis, bool is_initial_set, scalar_t initial_value, const int64_t outer_offset, const int64_t inner_offset, const int64_t data_stride_axis, const int64_t data_size_axis, const int64_t output_stride_axis, const int64_t output_size_axis, const int64_t lengths_cumsum_stride_axis) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= (outer_offset * segment_count * inner_offset)) { return; } int64_t row_id = idx / inner_offset; int64_t lane_id = idx % inner_offset; // lane_id is the inner_idx int64_t outer_idx = row_id / segment_count; int64_t dim_idx = row_id % segment_count; int64_t offset_idx = outer_idx * lengths_cumsum_stride_axis * (segment_count + 1) + dim_idx; index_t offset_start = lengths_cumsum_data[offset_idx]; index_t offset_end = lengths_cumsum_data[offset_idx + 1]; // ===== step2: apply reduction for (index_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; const auto data = values_data[data_index]; // TODO: There is no need to branch with every element if (reduction == ReductionType::MAX) { initial_value = at::_isnan(data) ? data : std::max<scalar_t>(initial_value, data); } else if ( reduction == ReductionType::MEAN || reduction == ReductionType::SUM) { initial_value = initial_value + data; } else if (reduction == ReductionType::MIN) { initial_value = at::_isnan(data) ? data : std::min<scalar_t>(initial_value, data); } else if ( reduction == ReductionType::PROD) { initial_value = initial_value * data; } } // ===== step3: finalize reduction int64_t lengths_idx = outer_idx * lengths_stride_axis * segment_count + dim_idx; CUDA_KERNEL_ASSERT(lengths_data[lengths_idx] >= 0); if (lengths_data[lengths_idx] == 0 && !is_initial_set && reduction == ReductionType::MEAN) { initial_value = static_cast<scalar_t>(NAN); } else if ( reduction == ReductionType::MEAN && lengths_data[lengths_idx] > 0 && !at::_isnan(initial_value)) { initial_value = initial_value / lengths_data[lengths_idx]; } int64_t output_index = outer_idx * output_stride_axis * output_size_axis + dim_idx * output_stride_axis + lane_id; output_data[output_index] = initial_value; } template <typename scalar_t, typename index_t> __global__ void segment_reduce_backward_kernel( ReductionType reduction, scalar_t* grad_input_data, const scalar_t* grad_data, const scalar_t* output_data, const scalar_t* values_data, const index_t* lengths_data, const index_t* lengths_cumsum_data, const int64_t segment_count, const int64_t lengths_stride_axis, scalar_t initial_prod_value, const int64_t outer_offset, const int64_t inner_offset, const int64_t data_stride_axis, const int64_t data_size_axis, const int64_t output_stride_axis, const int64_t output_size_axis, const int64_t lengths_cumsum_stride_axis) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= (outer_offset * segment_count * inner_offset)) { return; } int64_t row_id = idx / inner_offset; int64_t lane_id = idx % inner_offset; // lane_id is the inner_idx int64_t outer_idx = row_id / segment_count; int64_t dim_idx = row_id % segment_count; int64_t lengths_idx = outer_idx * lengths_stride_axis * segment_count + dim_idx; auto segment_length = lengths_data[lengths_idx]; if (segment_length == 0) { return; } int64_t offset_idx = outer_idx * lengths_cumsum_stride_axis * (segment_count + 1) + dim_idx; index_t offset_start = lengths_cumsum_data[offset_idx]; index_t offset_end = lengths_cumsum_data[offset_idx + 1]; int64_t output_index = outer_idx * output_stride_axis * output_size_axis + dim_idx * output_stride_axis + lane_id; if (reduction == ReductionType::MAX || reduction == ReductionType::MIN) { int64_t counter = 0; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; if (at::_isnan(values_data[data_index]) || values_data[data_index] == output_data[output_index]) { grad_input_data[data_index] = grad_data[output_index]; counter++; } } // Average gradient based on number of maximum elements in the // segment if (counter < 2) { return; } for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; if (grad_input_data[data_index] > 0) { grad_input_data[data_index] = grad_input_data[data_index] / counter; } } } else if (reduction == ReductionType::MEAN) { auto grad_val = grad_data[output_index] / segment_length; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; grad_input_data[data_index] = grad_val; } } else if (reduction == ReductionType::SUM) { const auto& grad_val = grad_data[output_index]; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; grad_input_data[data_index] = grad_val; } } else if (reduction == ReductionType::PROD) { const auto& grad_val = grad_data[output_index] * output_data[output_index]; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; if (at::_isnan(values_data[data_index]) || values_data[data_index] == 0) { // explicitly compute exclusive prod scalar_t exclusive_prod = initial_prod_value; int64_t prod_idx; for (int64_t k = offset_start; k < offset_end; ++k) { if (k != j) { prod_idx = outer_idx * data_stride_axis * data_size_axis + k * data_stride_axis + lane_id; exclusive_prod *= values_data[prod_idx]; } } grad_input_data[data_index] = grad_data[output_index] * exclusive_prod; } else { grad_input_data[data_index] = grad_val / values_data[data_index]; } } } } } // namespace Tensor _segment_reduce_lengths_offsets_backward_cuda_kernel( const Tensor& grad_contig, const Tensor& output_contig, const Tensor& data_contig, ReductionType reduction, const Tensor& lengths_or_offsets_contig, int64_t axis, const c10::optional<Scalar>& initial, bool is_offsets_like) { axis = lengths_or_offsets_contig.dim() - 1; int64_t segment_count = is_offsets_like ? lengths_or_offsets_contig.size(axis) - 1 : lengths_or_offsets_contig.size(axis); int64_t lengths_stride_axis = lengths_or_offsets_contig.stride(axis); auto grad_input = at::zeros({data_contig.sizes()}, grad_contig.options()); auto offsets = lengths_or_offsets_contig; auto lengths = lengths_or_offsets_contig; if (is_offsets_like) { lengths = lengths.diff(); } else { // _get_complete_sum only supports 1D auto zeros_shape = offsets.sizes().vec(); zeros_shape[axis] = 1; offsets = at::cat({at::zeros(zeros_shape, offsets.options()), offsets}, axis); offsets.cumsum_(axis); } // outer_offset is the size of the outer dimensions of output (before axis) // inner_offset is the size of the inner dimensions of output (after axis) int64_t outer_offset = 1, inner_offset = 1; for (int64_t d = 0; d < axis; d++) { outer_offset *= output_contig.size(d); } for (int64_t d = axis + 1; d < output_contig.dim(); d++) { inner_offset *= output_contig.size(d); } constexpr int threads_per_block = 256; int64_t num_blocks = (outer_offset * inner_offset * segment_count + threads_per_block - 1) / threads_per_block; num_blocks = ::max(num_blocks, (int64_t)1); auto data_stride_axis = data_contig.stride(axis); auto data_size_axis = data_contig.size(axis); auto output_stride_axis = output_contig.stride(axis); auto output_size_axis = output_contig.size(axis); auto offsets_stride_axis = offsets.stride(axis); AT_DISPATCH_INDEX_TYPES( lengths_or_offsets_contig.scalar_type(), "_segment_reduce_cuda_lengths_offsets_backward_kernel1", ([&] { const auto* lengths_data = lengths.const_data_ptr<index_t>(); auto* offsets_data = offsets.const_data_ptr<index_t>(); // TODO: Switch to TensorIterator for better maintainablility and // readability AT_DISPATCH_FLOATING_TYPES_AND2( kBFloat16, kHalf, data_contig.scalar_type(), "_segment_reduce_cpu", ([&]() { auto* output_data = output_contig.const_data_ptr<scalar_t>(); auto* grad_data = grad_contig.const_data_ptr<scalar_t>(); auto* grad_input_data = grad_input.mutable_data_ptr<scalar_t>(); const auto* values_data = data_contig.const_data_ptr<scalar_t>(); scalar_t initial_prod_value; if (initial.has_value()) { initial_prod_value = initial.value().to<scalar_t>(); } else { initial_prod_value = 1; } hipLaunchKernelGGL(( segment_reduce_backward_kernel<scalar_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), reduction, grad_input_data, grad_data, output_data, values_data, lengths_data, offsets_data, segment_count, lengths_stride_axis, initial_prod_value, outer_offset, inner_offset, data_stride_axis, data_size_axis, output_stride_axis, output_size_axis, offsets_stride_axis ); C10_HIP_KERNEL_LAUNCH_CHECK(); })); })); return grad_input; } Tensor _segment_reduce_lengths_backward_cuda_kernel( const Tensor& grad_contig, const Tensor& output_contig, const Tensor& data_contig, ReductionType reduction, const Tensor& lengths_contig, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_backward_cuda_kernel( grad_contig, output_contig, data_contig, reduction, lengths_contig, axis, initial, /*is_offsets_like=*/false); } Tensor _segment_reduce_offsets_backward_cuda_kernel( const Tensor& grad_contig, const Tensor& output_contig, const Tensor& data_contig, ReductionType reduction, const Tensor& offsets_contig, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_backward_cuda_kernel( grad_contig, output_contig, data_contig, reduction, offsets_contig, axis, initial, /*is_offsets_like=*/true); } Tensor _segment_reduce_lengths_offsets_cuda_kernel( ReductionType reduction, const Tensor& data, const Tensor& lengths_or_offsets, int64_t axis, const c10::optional<Scalar>& initial, bool is_offsets_like) { // data and lengths_or_offsets should be contiguous from the call to .contiguous in segment_reduce_kernel TORCH_CHECK(data.is_contiguous()); TORCH_CHECK(lengths_or_offsets.is_contiguous()); axis = lengths_or_offsets.dim() - 1; int64_t segment_count = is_offsets_like ? lengths_or_offsets.size(axis) - 1 : lengths_or_offsets.size(axis); int64_t lengths_stride_axis = lengths_or_offsets.stride(axis); auto output_shape = data.sizes().vec(); output_shape[axis] = segment_count; auto output = at::empty(output_shape, data.options()); auto offsets = lengths_or_offsets; auto lengths = lengths_or_offsets; if (is_offsets_like) { lengths = lengths.diff(); } else { // _get_complete_sum only supports 1D auto zeros_shape = offsets.sizes().vec(); zeros_shape[axis] = 1; offsets = at::cat({at::zeros(zeros_shape, offsets.options()), offsets}, axis); offsets.cumsum_(axis); } // outer_offset is the size of the outer dimensions of output (before axis) // inner_offset is the size of the inner dimensions of output (after axis) int64_t outer_offset = 1, inner_offset = 1; for (int64_t d = 0; d < axis; d++) { outer_offset *= output.size(d); } for (int64_t d = axis + 1; d < output.dim(); d++) { inner_offset *= output.size(d); } constexpr int threads_per_block = 256; // segment_count * stride_count is just output.numel() ? int64_t num_blocks = (output.numel() + threads_per_block - 1) / threads_per_block; num_blocks = ::max(num_blocks, (int64_t)1); auto data_stride_axis = data.stride(axis); auto data_size_axis = data.size(axis); auto output_stride_axis = output.stride(axis); auto output_size_axis = output.size(axis); auto offsets_stride_axis = offsets.stride(axis); AT_DISPATCH_INDEX_TYPES( lengths_or_offsets.scalar_type(), "_segment_reduce_cuda_kernel1", ([&] { auto* offsets_data_ptr = offsets.const_data_ptr<index_t>(); auto* lengths_data_ptr = lengths.const_data_ptr<index_t>(); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, data.scalar_type(), "segment_reduce_cuda", [&]() { auto* data_data_ptr = data.const_data_ptr<scalar_t>(); auto* output_data_ptr = output.mutable_data_ptr<scalar_t>(); // initialize starting value scalar_t initial_value; if (initial.has_value()) { initial_value = initial.value().to<scalar_t>(); } else if (reduction == ReductionType::MAX) { initial_value = -std::numeric_limits<scalar_t>::infinity(); } else if ( reduction == ReductionType::MEAN || reduction == ReductionType::SUM) { initial_value = 0; } else if (reduction == ReductionType::MIN) { initial_value = std::numeric_limits<scalar_t>::infinity(); } else if (reduction == ReductionType::PROD) { initial_value = 1; } if (output_shape.size() > 1) { hipLaunchKernelGGL(( segment_reduce_forward_kernel<scalar_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), reduction, output_data_ptr, data_data_ptr, lengths_data_ptr, offsets_data_ptr, segment_count, lengths_stride_axis, initial.has_value(), initial_value, outer_offset, inner_offset, data_stride_axis, data_size_axis, output_stride_axis, output_size_axis, offsets_stride_axis ); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { if (reduction == ReductionType::MAX) { CustomMax max_op{}; CUB_WRAPPER( hipcub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, max_op, initial_value, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); } else if (reduction == ReductionType::MEAN) { CustomSum sum_op{}; CUB_WRAPPER( hipcub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, sum_op, initial_value, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); hipLaunchKernelGGL(( post_sum_div_kernel<scalar_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output_data_ptr, lengths_data_ptr, segment_count, initial.has_value(), initial_value); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (reduction == ReductionType::MIN) { CustomMin min_op{}; CUB_WRAPPER( hipcub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, min_op, initial_value, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); } else if (reduction == ReductionType::SUM) { CustomSum sum_op{}; CUB_WRAPPER( hipcub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, sum_op, initial_value, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); } else if (reduction == ReductionType::PROD) { CustomProd prod_op{}; CUB_WRAPPER( hipcub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, prod_op, initial_value, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); } } }); })); return output; } Tensor _segment_reduce_lengths_cuda_kernel( ReductionType reduction, const Tensor& data, const Tensor& lengths, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_cuda_kernel( reduction, data, lengths, axis, initial, /*is_offsets_like=*/false); } Tensor _segment_reduce_offsets_cuda_kernel( ReductionType reduction, const Tensor& data, const Tensor& offsets, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_cuda_kernel( reduction, data, offsets, axis, initial, /*is_offsets_like=*/true); } REGISTER_DISPATCH(_segment_reduce_lengths_stub, &_segment_reduce_lengths_cuda_kernel); REGISTER_DISPATCH(_segment_reduce_offsets_stub, &_segment_reduce_offsets_cuda_kernel); REGISTER_DISPATCH( _segment_reduce_lengths_backward_stub, &_segment_reduce_lengths_backward_cuda_kernel); REGISTER_DISPATCH( _segment_reduce_offsets_backward_stub, &_segment_reduce_offsets_backward_cuda_kernel); } // namespace at::native
66427f1bc3dc9f79b25a2fd6e1e295709ab8e975.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/SegmentReduce.h> #include <ATen/core/Tensor.h> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/cub.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/zeros.h> #include <ATen/ops/cat.h> #include <ATen/ops/cumsum.h> #endif namespace at::native { namespace { struct CustomMax { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { if (at::_isnan(a)) { return a; } else if (at::_isnan(b)) { return b; } return std::max<OutputT>(a, b); } }; struct CustomSum { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { return a + b; } }; struct CustomProd { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { return a * b; } }; struct CustomMin { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { if (at::_isnan(a)) { return a; } else if (at::_isnan(b)) { return b; } return std::min<OutputT>(a, b); } }; Tensor _get_complete_sum(const Tensor& lengths) { int64_t segment_count = lengths.numel(); TORCH_CHECK(segment_count < INT_MAX); auto offsets = at::empty({segment_count + 1}, lengths.options()); offsets[0].zero_(); AT_DISPATCH_INDEX_TYPES( lengths.scalar_type(), "_segment_reduce_cuda_lengths_offsets_backward_kernel1", ([&] { auto* lengths_data_ptr = lengths.const_data_ptr<index_t>(); auto* offsets_data_ptr = offsets.mutable_data_ptr<index_t>(); at::cuda::cub::inclusive_sum( lengths_data_ptr, offsets_data_ptr + 1, segment_count); })); return offsets; } template <typename scalar_t, typename index_t> __global__ static void post_sum_div_kernel( scalar_t* output_data, const index_t* lengths_data, const int64_t segment_count, bool is_initial_set, scalar_t initial) { CUDA_KERNEL_LOOP(index, segment_count) { CUDA_KERNEL_ASSERT(lengths_data[index] >= 0); if (lengths_data[index] == 0) { if (is_initial_set) { output_data[index] = initial; } else { output_data[index] = NAN; } } else if (!at::_isnan(output_data[index])) { output_data[index] = output_data[index] / lengths_data[index]; } } } template <typename scalar_t, typename index_t> __global__ void segment_reduce_forward_kernel( ReductionType reduction, scalar_t* output_data, const scalar_t* values_data, const index_t* lengths_data, const index_t* lengths_cumsum_data, const int64_t segment_count, const int64_t lengths_stride_axis, bool is_initial_set, scalar_t initial_value, const int64_t outer_offset, const int64_t inner_offset, const int64_t data_stride_axis, const int64_t data_size_axis, const int64_t output_stride_axis, const int64_t output_size_axis, const int64_t lengths_cumsum_stride_axis) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= (outer_offset * segment_count * inner_offset)) { return; } int64_t row_id = idx / inner_offset; int64_t lane_id = idx % inner_offset; // lane_id is the inner_idx int64_t outer_idx = row_id / segment_count; int64_t dim_idx = row_id % segment_count; int64_t offset_idx = outer_idx * lengths_cumsum_stride_axis * (segment_count + 1) + dim_idx; index_t offset_start = lengths_cumsum_data[offset_idx]; index_t offset_end = lengths_cumsum_data[offset_idx + 1]; // ===== step2: apply reduction for (index_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; const auto data = values_data[data_index]; // TODO: There is no need to branch with every element if (reduction == ReductionType::MAX) { initial_value = at::_isnan(data) ? data : std::max<scalar_t>(initial_value, data); } else if ( reduction == ReductionType::MEAN || reduction == ReductionType::SUM) { initial_value = initial_value + data; } else if (reduction == ReductionType::MIN) { initial_value = at::_isnan(data) ? data : std::min<scalar_t>(initial_value, data); } else if ( reduction == ReductionType::PROD) { initial_value = initial_value * data; } } // ===== step3: finalize reduction int64_t lengths_idx = outer_idx * lengths_stride_axis * segment_count + dim_idx; CUDA_KERNEL_ASSERT(lengths_data[lengths_idx] >= 0); if (lengths_data[lengths_idx] == 0 && !is_initial_set && reduction == ReductionType::MEAN) { initial_value = static_cast<scalar_t>(NAN); } else if ( reduction == ReductionType::MEAN && lengths_data[lengths_idx] > 0 && !at::_isnan(initial_value)) { initial_value = initial_value / lengths_data[lengths_idx]; } int64_t output_index = outer_idx * output_stride_axis * output_size_axis + dim_idx * output_stride_axis + lane_id; output_data[output_index] = initial_value; } template <typename scalar_t, typename index_t> __global__ void segment_reduce_backward_kernel( ReductionType reduction, scalar_t* grad_input_data, const scalar_t* grad_data, const scalar_t* output_data, const scalar_t* values_data, const index_t* lengths_data, const index_t* lengths_cumsum_data, const int64_t segment_count, const int64_t lengths_stride_axis, scalar_t initial_prod_value, const int64_t outer_offset, const int64_t inner_offset, const int64_t data_stride_axis, const int64_t data_size_axis, const int64_t output_stride_axis, const int64_t output_size_axis, const int64_t lengths_cumsum_stride_axis) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= (outer_offset * segment_count * inner_offset)) { return; } int64_t row_id = idx / inner_offset; int64_t lane_id = idx % inner_offset; // lane_id is the inner_idx int64_t outer_idx = row_id / segment_count; int64_t dim_idx = row_id % segment_count; int64_t lengths_idx = outer_idx * lengths_stride_axis * segment_count + dim_idx; auto segment_length = lengths_data[lengths_idx]; if (segment_length == 0) { return; } int64_t offset_idx = outer_idx * lengths_cumsum_stride_axis * (segment_count + 1) + dim_idx; index_t offset_start = lengths_cumsum_data[offset_idx]; index_t offset_end = lengths_cumsum_data[offset_idx + 1]; int64_t output_index = outer_idx * output_stride_axis * output_size_axis + dim_idx * output_stride_axis + lane_id; if (reduction == ReductionType::MAX || reduction == ReductionType::MIN) { int64_t counter = 0; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; if (at::_isnan(values_data[data_index]) || values_data[data_index] == output_data[output_index]) { grad_input_data[data_index] = grad_data[output_index]; counter++; } } // Average gradient based on number of maximum elements in the // segment if (counter < 2) { return; } for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; if (grad_input_data[data_index] > 0) { grad_input_data[data_index] = grad_input_data[data_index] / counter; } } } else if (reduction == ReductionType::MEAN) { auto grad_val = grad_data[output_index] / segment_length; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; grad_input_data[data_index] = grad_val; } } else if (reduction == ReductionType::SUM) { const auto& grad_val = grad_data[output_index]; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; grad_input_data[data_index] = grad_val; } } else if (reduction == ReductionType::PROD) { const auto& grad_val = grad_data[output_index] * output_data[output_index]; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; if (at::_isnan(values_data[data_index]) || values_data[data_index] == 0) { // explicitly compute exclusive prod scalar_t exclusive_prod = initial_prod_value; int64_t prod_idx; for (int64_t k = offset_start; k < offset_end; ++k) { if (k != j) { prod_idx = outer_idx * data_stride_axis * data_size_axis + k * data_stride_axis + lane_id; exclusive_prod *= values_data[prod_idx]; } } grad_input_data[data_index] = grad_data[output_index] * exclusive_prod; } else { grad_input_data[data_index] = grad_val / values_data[data_index]; } } } } } // namespace Tensor _segment_reduce_lengths_offsets_backward_cuda_kernel( const Tensor& grad_contig, const Tensor& output_contig, const Tensor& data_contig, ReductionType reduction, const Tensor& lengths_or_offsets_contig, int64_t axis, const c10::optional<Scalar>& initial, bool is_offsets_like) { axis = lengths_or_offsets_contig.dim() - 1; int64_t segment_count = is_offsets_like ? lengths_or_offsets_contig.size(axis) - 1 : lengths_or_offsets_contig.size(axis); int64_t lengths_stride_axis = lengths_or_offsets_contig.stride(axis); auto grad_input = at::zeros({data_contig.sizes()}, grad_contig.options()); auto offsets = lengths_or_offsets_contig; auto lengths = lengths_or_offsets_contig; if (is_offsets_like) { lengths = lengths.diff(); } else { // _get_complete_sum only supports 1D auto zeros_shape = offsets.sizes().vec(); zeros_shape[axis] = 1; offsets = at::cat({at::zeros(zeros_shape, offsets.options()), offsets}, axis); offsets.cumsum_(axis); } // outer_offset is the size of the outer dimensions of output (before axis) // inner_offset is the size of the inner dimensions of output (after axis) int64_t outer_offset = 1, inner_offset = 1; for (int64_t d = 0; d < axis; d++) { outer_offset *= output_contig.size(d); } for (int64_t d = axis + 1; d < output_contig.dim(); d++) { inner_offset *= output_contig.size(d); } constexpr int threads_per_block = 256; int64_t num_blocks = (outer_offset * inner_offset * segment_count + threads_per_block - 1) / threads_per_block; num_blocks = std::max(num_blocks, (int64_t)1); auto data_stride_axis = data_contig.stride(axis); auto data_size_axis = data_contig.size(axis); auto output_stride_axis = output_contig.stride(axis); auto output_size_axis = output_contig.size(axis); auto offsets_stride_axis = offsets.stride(axis); AT_DISPATCH_INDEX_TYPES( lengths_or_offsets_contig.scalar_type(), "_segment_reduce_cuda_lengths_offsets_backward_kernel1", ([&] { const auto* lengths_data = lengths.const_data_ptr<index_t>(); auto* offsets_data = offsets.const_data_ptr<index_t>(); // TODO: Switch to TensorIterator for better maintainablility and // readability AT_DISPATCH_FLOATING_TYPES_AND2( kBFloat16, kHalf, data_contig.scalar_type(), "_segment_reduce_cpu", ([&]() { auto* output_data = output_contig.const_data_ptr<scalar_t>(); auto* grad_data = grad_contig.const_data_ptr<scalar_t>(); auto* grad_input_data = grad_input.mutable_data_ptr<scalar_t>(); const auto* values_data = data_contig.const_data_ptr<scalar_t>(); scalar_t initial_prod_value; if (initial.has_value()) { initial_prod_value = initial.value().to<scalar_t>(); } else { initial_prod_value = 1; } segment_reduce_backward_kernel<scalar_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( reduction, grad_input_data, grad_data, output_data, values_data, lengths_data, offsets_data, segment_count, lengths_stride_axis, initial_prod_value, outer_offset, inner_offset, data_stride_axis, data_size_axis, output_stride_axis, output_size_axis, offsets_stride_axis ); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); })); return grad_input; } Tensor _segment_reduce_lengths_backward_cuda_kernel( const Tensor& grad_contig, const Tensor& output_contig, const Tensor& data_contig, ReductionType reduction, const Tensor& lengths_contig, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_backward_cuda_kernel( grad_contig, output_contig, data_contig, reduction, lengths_contig, axis, initial, /*is_offsets_like=*/false); } Tensor _segment_reduce_offsets_backward_cuda_kernel( const Tensor& grad_contig, const Tensor& output_contig, const Tensor& data_contig, ReductionType reduction, const Tensor& offsets_contig, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_backward_cuda_kernel( grad_contig, output_contig, data_contig, reduction, offsets_contig, axis, initial, /*is_offsets_like=*/true); } Tensor _segment_reduce_lengths_offsets_cuda_kernel( ReductionType reduction, const Tensor& data, const Tensor& lengths_or_offsets, int64_t axis, const c10::optional<Scalar>& initial, bool is_offsets_like) { // data and lengths_or_offsets should be contiguous from the call to .contiguous in segment_reduce_kernel TORCH_CHECK(data.is_contiguous()); TORCH_CHECK(lengths_or_offsets.is_contiguous()); axis = lengths_or_offsets.dim() - 1; int64_t segment_count = is_offsets_like ? lengths_or_offsets.size(axis) - 1 : lengths_or_offsets.size(axis); int64_t lengths_stride_axis = lengths_or_offsets.stride(axis); auto output_shape = data.sizes().vec(); output_shape[axis] = segment_count; auto output = at::empty(output_shape, data.options()); auto offsets = lengths_or_offsets; auto lengths = lengths_or_offsets; if (is_offsets_like) { lengths = lengths.diff(); } else { // _get_complete_sum only supports 1D auto zeros_shape = offsets.sizes().vec(); zeros_shape[axis] = 1; offsets = at::cat({at::zeros(zeros_shape, offsets.options()), offsets}, axis); offsets.cumsum_(axis); } // outer_offset is the size of the outer dimensions of output (before axis) // inner_offset is the size of the inner dimensions of output (after axis) int64_t outer_offset = 1, inner_offset = 1; for (int64_t d = 0; d < axis; d++) { outer_offset *= output.size(d); } for (int64_t d = axis + 1; d < output.dim(); d++) { inner_offset *= output.size(d); } constexpr int threads_per_block = 256; // segment_count * stride_count is just output.numel() ? int64_t num_blocks = (output.numel() + threads_per_block - 1) / threads_per_block; num_blocks = std::max(num_blocks, (int64_t)1); auto data_stride_axis = data.stride(axis); auto data_size_axis = data.size(axis); auto output_stride_axis = output.stride(axis); auto output_size_axis = output.size(axis); auto offsets_stride_axis = offsets.stride(axis); AT_DISPATCH_INDEX_TYPES( lengths_or_offsets.scalar_type(), "_segment_reduce_cuda_kernel1", ([&] { auto* offsets_data_ptr = offsets.const_data_ptr<index_t>(); auto* lengths_data_ptr = lengths.const_data_ptr<index_t>(); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, data.scalar_type(), "segment_reduce_cuda", [&]() { auto* data_data_ptr = data.const_data_ptr<scalar_t>(); auto* output_data_ptr = output.mutable_data_ptr<scalar_t>(); // initialize starting value scalar_t initial_value; if (initial.has_value()) { initial_value = initial.value().to<scalar_t>(); } else if (reduction == ReductionType::MAX) { initial_value = -std::numeric_limits<scalar_t>::infinity(); } else if ( reduction == ReductionType::MEAN || reduction == ReductionType::SUM) { initial_value = 0; } else if (reduction == ReductionType::MIN) { initial_value = std::numeric_limits<scalar_t>::infinity(); } else if (reduction == ReductionType::PROD) { initial_value = 1; } if (output_shape.size() > 1) { segment_reduce_forward_kernel<scalar_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( reduction, output_data_ptr, data_data_ptr, lengths_data_ptr, offsets_data_ptr, segment_count, lengths_stride_axis, initial.has_value(), initial_value, outer_offset, inner_offset, data_stride_axis, data_size_axis, output_stride_axis, output_size_axis, offsets_stride_axis ); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { if (reduction == ReductionType::MAX) { CustomMax max_op{}; CUB_WRAPPER( cub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, max_op, initial_value, at::cuda::getCurrentCUDAStream()); } else if (reduction == ReductionType::MEAN) { CustomSum sum_op{}; CUB_WRAPPER( cub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, sum_op, initial_value, at::cuda::getCurrentCUDAStream()); post_sum_div_kernel<scalar_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( output_data_ptr, lengths_data_ptr, segment_count, initial.has_value(), initial_value); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (reduction == ReductionType::MIN) { CustomMin min_op{}; CUB_WRAPPER( cub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, min_op, initial_value, at::cuda::getCurrentCUDAStream()); } else if (reduction == ReductionType::SUM) { CustomSum sum_op{}; CUB_WRAPPER( cub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, sum_op, initial_value, at::cuda::getCurrentCUDAStream()); } else if (reduction == ReductionType::PROD) { CustomProd prod_op{}; CUB_WRAPPER( cub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, prod_op, initial_value, at::cuda::getCurrentCUDAStream()); } } }); })); return output; } Tensor _segment_reduce_lengths_cuda_kernel( ReductionType reduction, const Tensor& data, const Tensor& lengths, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_cuda_kernel( reduction, data, lengths, axis, initial, /*is_offsets_like=*/false); } Tensor _segment_reduce_offsets_cuda_kernel( ReductionType reduction, const Tensor& data, const Tensor& offsets, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_cuda_kernel( reduction, data, offsets, axis, initial, /*is_offsets_like=*/true); } REGISTER_DISPATCH(_segment_reduce_lengths_stub, &_segment_reduce_lengths_cuda_kernel); REGISTER_DISPATCH(_segment_reduce_offsets_stub, &_segment_reduce_offsets_cuda_kernel); REGISTER_DISPATCH( _segment_reduce_lengths_backward_stub, &_segment_reduce_lengths_backward_cuda_kernel); REGISTER_DISPATCH( _segment_reduce_offsets_backward_stub, &_segment_reduce_offsets_backward_cuda_kernel); } // namespace at::native
weems_emmart_texture_memory.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.hu" #include <hip/hip_cooperative_groups.h> #define PI 3.141592654f texture<DATA_TYPE, hipTextureType2D, hipReadModeElementType> texRef; __device__ __forceinline__ DATA_TYPE mul(DATA_TYPE a, DATA_TYPE b) { return make_float2(a.x * b.x - a.y * b.y, a.x * b.y + a.y * b.x); } __device__ __forceinline__ DATA_TYPE twiddle(DATA_TYPE a, size_t n, size_t block, size_t row) { // todo: static float f = (-2 * PI * block * row) / n; return mul(a, make_float2(cosf(f), sinf(f))); } __device__ __forceinline__ void sFFT(DATA_TYPE out[], DATA_TYPE a[], size_t n) { for (int i = 0; i < n; i++) { float real = 0.0; float imag = 0.0; float pow = 2 * PI * i / (float)n; for (int j = 0; j < n; j++) { float powh = fmodf(j * pow, 2 * PI); DATA_TYPE d = a[j]; real += d.x * cosf(powh) + d.y * sinf(powh); imag += - d.x * sinf(powh) + d.y * cosf(powh); } out[i] = make_float2(real, imag); } } __global__ void kernel1_2(DATA_TYPE *output, DATA_TYPE *data, int kernel_id) { DATA_TYPE sample[8]; DATA_TYPE out[8]; __shared__ DATA_TYPE block[512]; for (int i = 0; i < 8; i++) sample[i] = data[(blockIdx.x + threadIdx.y * 128 + i * 128 * 8) * 8 + threadIdx.x]; // 1. 8-point fft sFFT(out, sample, 8); // 2. transpose through shared memory { for (int i = 0; i < 8; i++) block[(threadIdx.y * blockDim.x + i) * 8 + threadIdx.x] = out[i]; __syncthreads(); for (int i = 0; i < 8; i++) sample[i] = block[i * blockDim.x * 8 + threadIdx.y * 8 + threadIdx.x]; } // 3. twiddle for (int i = 0; i < 8; i++) sample[i] = twiddle(sample[i], 64, i, threadIdx.y); // 4. 8-point fft sFFT(out, sample, 8); if (kernel_id == 1) { for (int i = 0; i < 8; i++) { size_t id = (blockIdx.x + threadIdx.y * 128 + i * 128 * 8); size_t row = id / 2; size_t rem = blockIdx.x % 2; output[(((row % 64) * 64 + row / 64) * 2 + rem) * 8 + threadIdx.x] = twiddle(out[i], 64 * 64, row % 64, (row / 64) % 64); } } else { for (int i = 0; i < 8; i++) { size_t id = (blockIdx.x + threadIdx.y * 128 + i * 128 * 8); size_t row = id / 2; size_t rem = blockIdx.x % 2; output[id * 8 + threadIdx.x] = mul(out[i], tex2D(texRef, (rem * 8 + threadIdx.x) / (float)16, row / (float) 4096 )); } } } __global__ void kernel3(DATA_TYPE *output, DATA_TYPE *data) { size_t pos = (blockIdx.x * blockDim.x + threadIdx.x) * 16; DATA_TYPE sample[16]; DATA_TYPE out[16]; for (int i = 0; i < 16; i++) sample[i] = data[pos + i]; // 1. 16-point fft sFFT(out, sample, 16); for (int i = 0; i < 16; i++) { output[pos + i] = out[i]; } } void fft(DATA_TYPE *output, DATA_TYPE *data) { dim3 blockDim1(8, 8, 1); dim3 blockDim3(32, 1, 1); dim3 gridDim(128); hipLaunchKernelGGL(( kernel1_2), dim3(gridDim), dim3(blockDim1), 0, 0, output, data, 1); hipLaunchKernelGGL(( kernel1_2), dim3(gridDim), dim3(blockDim1), 0, 0, data, output, 2); hipLaunchKernelGGL(( kernel3), dim3(gridDim), dim3(blockDim3), 0, 0, output, data); } std::vector<float> benchmark(DATA_TYPE *output, DATA_TYPE *data, hipEvent_t start, hipEvent_t stop) { DATA_TYPE *dev_output, *dev_middle, *dev_data, *middle, *middle2; std::vector<float> time(2); /* Setup */ cudaCheckReturn(hipHostMalloc(&middle, DATA_SIZE * sizeof(DATA_TYPE))); cudaCheckReturn(hipHostMalloc(&middle2, DATA_SIZE * sizeof(DATA_TYPE))); cudaCheckReturn(hipMalloc(&dev_data, DATA_SIZE * sizeof(DATA_TYPE))); cudaCheckReturn(hipMalloc(&dev_middle, DATA_SIZE * sizeof(DATA_TYPE))); cudaCheckReturn(hipMalloc(&dev_output, DATA_SIZE * sizeof(DATA_TYPE))); cudaCheckReturn(hipMemcpy(dev_data, data, DATA_SIZE * sizeof(DATA_TYPE), hipMemcpyHostToDevice)); // Texture Momory DATA_TYPE *twiddle_65536; hipArray *dev_twiddle_65536; hipHostMalloc(&twiddle_65536, 65536 * sizeof(DATA_TYPE)); for (int i = 0; i < 16; i++) { for (int j = 0; j < 4096; j++) { float f = (-2 * PI * i * j) / 65536; twiddle_65536[i * 4096 + j] = make_float2(cosf(f), sinf(f)); } } hipChannelFormatDesc channelDesc = hipCreateChannelDesc<DATA_TYPE>(); cudaCheckReturn(hipMallocArray(&dev_twiddle_65536, &channelDesc, 4096, 16)); cudaCheckReturn(hipMemcpyToArray( dev_twiddle_65536, 0, 0, twiddle_65536, 65536 * sizeof(DATA_SIZE), hipMemcpyHostToDevice)); texRef.normalized = true; texRef.filterMode = hipFilterModePoint; texRef.addressMode[0] = hipAddressModeWrap; texRef.addressMode[1] = hipAddressModeWrap; cudaCheckReturn(hipBindTextureToArray(texRef, dev_twiddle_65536, channelDesc)); // cuFFT hipfftHandle plan; cufftCheckReturn(hipfftCreate(&plan)); long long len = DATA_SIZE; size_t ws = 0; cufftCheckReturn( cufftXtMakePlanMany( plan, 1, &len, NULL, 1, 1, HIP_C_32F, NULL, 1, 1, HIP_C_32F, 1, &ws, HIP_C_32F)); /* FFT */ cudaCheckReturn(hipDeviceSynchronize()); cudaCheckReturn(hipEventRecord(start)); // cufftCheckReturn(cufftXtExec(plan, dev_data, dev_middle, HIPFFT_FORWARD)); fft(dev_middle, dev_data); cudaCheckReturn(hipEventRecord(stop)); cudaCheckReturn(hipEventSynchronize(stop)); cudaCheckKernel(); cudaCheckReturn(hipEventElapsedTime(&time[0], start, stop)); /* Scaling */ cudaCheckReturn(hipMemcpy(middle, dev_middle, DATA_SIZE * sizeof(DATA_TYPE), hipMemcpyDeviceToHost)); for (size_t i = 0; i < DATA_SIZE; i++) { float2 m = middle[i]; m.x /= DATA_SIZE; m.y /= DATA_SIZE; middle2[i] = m; } for (size_t i = 0; i < 4096; i++) { for (size_t j = 0; j < 16; j++) { middle[j * 4096 + i] = middle2[i * 16 + j]; } } cudaCheckReturn(hipMemcpy(dev_middle, middle, DATA_SIZE * sizeof(DATA_TYPE), hipMemcpyHostToDevice)); /* IFFT */ cudaCheckReturn(hipDeviceSynchronize()); cudaCheckReturn(hipEventRecord(start)); cufftCheckReturn(cufftXtExec(plan, dev_middle, dev_output, HIPFFT_BACKWARD)); cudaCheckReturn(hipEventRecord(stop)); cudaCheckReturn(hipEventSynchronize(stop)); cudaCheckKernel(); cudaCheckReturn(hipEventElapsedTime(&time[1], start, stop)); /* Close */ cufftCheckReturn(hipfftDestroy(plan)); cudaCheckReturn(hipMemcpy(output, dev_output, DATA_SIZE * sizeof(DATA_TYPE), hipMemcpyDeviceToHost)); cudaCheckReturn(hipHostFree(middle)); cudaCheckReturn(hipFree(dev_output)); cudaCheckReturn(hipFree(dev_middle)); cudaCheckReturn(hipFree(dev_data)); return time; }
weems_emmart_texture_memory.cu
#include "common.hu" #include <cooperative_groups.h> #define PI 3.141592654f texture<DATA_TYPE, cudaTextureType2D, cudaReadModeElementType> texRef; __device__ __forceinline__ DATA_TYPE mul(DATA_TYPE a, DATA_TYPE b) { return make_float2(a.x * b.x - a.y * b.y, a.x * b.y + a.y * b.x); } __device__ __forceinline__ DATA_TYPE twiddle(DATA_TYPE a, size_t n, size_t block, size_t row) { // todo: static float f = (-2 * PI * block * row) / n; return mul(a, make_float2(cosf(f), sinf(f))); } __device__ __forceinline__ void sFFT(DATA_TYPE out[], DATA_TYPE a[], size_t n) { for (int i = 0; i < n; i++) { float real = 0.0; float imag = 0.0; float pow = 2 * PI * i / (float)n; for (int j = 0; j < n; j++) { float powh = fmodf(j * pow, 2 * PI); DATA_TYPE d = a[j]; real += d.x * cosf(powh) + d.y * sinf(powh); imag += - d.x * sinf(powh) + d.y * cosf(powh); } out[i] = make_float2(real, imag); } } __global__ void kernel1_2(DATA_TYPE *output, DATA_TYPE *data, int kernel_id) { DATA_TYPE sample[8]; DATA_TYPE out[8]; __shared__ DATA_TYPE block[512]; for (int i = 0; i < 8; i++) sample[i] = data[(blockIdx.x + threadIdx.y * 128 + i * 128 * 8) * 8 + threadIdx.x]; // 1. 8-point fft sFFT(out, sample, 8); // 2. transpose through shared memory { for (int i = 0; i < 8; i++) block[(threadIdx.y * blockDim.x + i) * 8 + threadIdx.x] = out[i]; __syncthreads(); for (int i = 0; i < 8; i++) sample[i] = block[i * blockDim.x * 8 + threadIdx.y * 8 + threadIdx.x]; } // 3. twiddle for (int i = 0; i < 8; i++) sample[i] = twiddle(sample[i], 64, i, threadIdx.y); // 4. 8-point fft sFFT(out, sample, 8); if (kernel_id == 1) { for (int i = 0; i < 8; i++) { size_t id = (blockIdx.x + threadIdx.y * 128 + i * 128 * 8); size_t row = id / 2; size_t rem = blockIdx.x % 2; output[(((row % 64) * 64 + row / 64) * 2 + rem) * 8 + threadIdx.x] = twiddle(out[i], 64 * 64, row % 64, (row / 64) % 64); } } else { for (int i = 0; i < 8; i++) { size_t id = (blockIdx.x + threadIdx.y * 128 + i * 128 * 8); size_t row = id / 2; size_t rem = blockIdx.x % 2; output[id * 8 + threadIdx.x] = mul(out[i], tex2D(texRef, (rem * 8 + threadIdx.x) / (float)16, row / (float) 4096 )); } } } __global__ void kernel3(DATA_TYPE *output, DATA_TYPE *data) { size_t pos = (blockIdx.x * blockDim.x + threadIdx.x) * 16; DATA_TYPE sample[16]; DATA_TYPE out[16]; for (int i = 0; i < 16; i++) sample[i] = data[pos + i]; // 1. 16-point fft sFFT(out, sample, 16); for (int i = 0; i < 16; i++) { output[pos + i] = out[i]; } } void fft(DATA_TYPE *output, DATA_TYPE *data) { dim3 blockDim1(8, 8, 1); dim3 blockDim3(32, 1, 1); dim3 gridDim(128); kernel1_2<<<gridDim, blockDim1>>>(output, data, 1); kernel1_2<<<gridDim, blockDim1>>>(data, output, 2); kernel3<<<gridDim, blockDim3>>>(output, data); } std::vector<float> benchmark(DATA_TYPE *output, DATA_TYPE *data, cudaEvent_t start, cudaEvent_t stop) { DATA_TYPE *dev_output, *dev_middle, *dev_data, *middle, *middle2; std::vector<float> time(2); /* Setup */ cudaCheckReturn(cudaMallocHost(&middle, DATA_SIZE * sizeof(DATA_TYPE))); cudaCheckReturn(cudaMallocHost(&middle2, DATA_SIZE * sizeof(DATA_TYPE))); cudaCheckReturn(cudaMalloc(&dev_data, DATA_SIZE * sizeof(DATA_TYPE))); cudaCheckReturn(cudaMalloc(&dev_middle, DATA_SIZE * sizeof(DATA_TYPE))); cudaCheckReturn(cudaMalloc(&dev_output, DATA_SIZE * sizeof(DATA_TYPE))); cudaCheckReturn(cudaMemcpy(dev_data, data, DATA_SIZE * sizeof(DATA_TYPE), cudaMemcpyHostToDevice)); // Texture Momory DATA_TYPE *twiddle_65536; cudaArray *dev_twiddle_65536; cudaMallocHost(&twiddle_65536, 65536 * sizeof(DATA_TYPE)); for (int i = 0; i < 16; i++) { for (int j = 0; j < 4096; j++) { float f = (-2 * PI * i * j) / 65536; twiddle_65536[i * 4096 + j] = make_float2(cosf(f), sinf(f)); } } cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<DATA_TYPE>(); cudaCheckReturn(cudaMallocArray(&dev_twiddle_65536, &channelDesc, 4096, 16)); cudaCheckReturn(cudaMemcpyToArray( dev_twiddle_65536, 0, 0, twiddle_65536, 65536 * sizeof(DATA_SIZE), cudaMemcpyHostToDevice)); texRef.normalized = true; texRef.filterMode = cudaFilterModePoint; texRef.addressMode[0] = cudaAddressModeWrap; texRef.addressMode[1] = cudaAddressModeWrap; cudaCheckReturn(cudaBindTextureToArray(texRef, dev_twiddle_65536, channelDesc)); // cuFFT cufftHandle plan; cufftCheckReturn(cufftCreate(&plan)); long long len = DATA_SIZE; size_t ws = 0; cufftCheckReturn( cufftXtMakePlanMany( plan, 1, &len, NULL, 1, 1, CUDA_C_32F, NULL, 1, 1, CUDA_C_32F, 1, &ws, CUDA_C_32F)); /* FFT */ cudaCheckReturn(cudaDeviceSynchronize()); cudaCheckReturn(cudaEventRecord(start)); // cufftCheckReturn(cufftXtExec(plan, dev_data, dev_middle, CUFFT_FORWARD)); fft(dev_middle, dev_data); cudaCheckReturn(cudaEventRecord(stop)); cudaCheckReturn(cudaEventSynchronize(stop)); cudaCheckKernel(); cudaCheckReturn(cudaEventElapsedTime(&time[0], start, stop)); /* Scaling */ cudaCheckReturn(cudaMemcpy(middle, dev_middle, DATA_SIZE * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost)); for (size_t i = 0; i < DATA_SIZE; i++) { float2 m = middle[i]; m.x /= DATA_SIZE; m.y /= DATA_SIZE; middle2[i] = m; } for (size_t i = 0; i < 4096; i++) { for (size_t j = 0; j < 16; j++) { middle[j * 4096 + i] = middle2[i * 16 + j]; } } cudaCheckReturn(cudaMemcpy(dev_middle, middle, DATA_SIZE * sizeof(DATA_TYPE), cudaMemcpyHostToDevice)); /* IFFT */ cudaCheckReturn(cudaDeviceSynchronize()); cudaCheckReturn(cudaEventRecord(start)); cufftCheckReturn(cufftXtExec(plan, dev_middle, dev_output, CUFFT_INVERSE)); cudaCheckReturn(cudaEventRecord(stop)); cudaCheckReturn(cudaEventSynchronize(stop)); cudaCheckKernel(); cudaCheckReturn(cudaEventElapsedTime(&time[1], start, stop)); /* Close */ cufftCheckReturn(cufftDestroy(plan)); cudaCheckReturn(cudaMemcpy(output, dev_output, DATA_SIZE * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost)); cudaCheckReturn(cudaFreeHost(middle)); cudaCheckReturn(cudaFree(dev_output)); cudaCheckReturn(cudaFree(dev_middle)); cudaCheckReturn(cudaFree(dev_data)); return time; }
ef2874478646f05217d7525636c8ba73186f9647.hip
// !!! This is a file automatically generated by hipify!!! #include <cudnn.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <malloc.h> #include <cstdlib> #include <time.h> #include <iostream> #include <sys/types.h> #include <errno.h> #include <vector> #include <fstream> #include <string> #include <omp.h> #define TH 2 #define TW 4 #define TC 16 #define C 192 #define N 96 #define H 28 #define W 28 #define TCS ((C-1)/TC + 1) #define THS ((H-1)/TH + 1) #define TWS ((W-1)/TW+1) #define WPAD (TWS*TW + 2) #define R 3 #define S 3 using namespace std; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } inline void chkerr(hipError_t code) { if (code != hipSuccess) { std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl; exit(-1); } } extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) { float compute_local[8]; __shared__ float pad_temp_shared[4032]; __shared__ float kernel_shared[1728]; float pad_temp_shared_local[16]; float kernel_shared_local[3]; compute_local[(0)] = 0.000000e+00f; compute_local[(2)] = 0.000000e+00f; compute_local[(4)] = 0.000000e+00f; compute_local[(6)] = 0.000000e+00f; compute_local[(1)] = 0.000000e+00f; compute_local[(3)] = 0.000000e+00f; compute_local[(5)] = 0.000000e+00f; compute_local[(7)] = 0.000000e+00f; for (int rc_outer = 0; rc_outer < 8; ++rc_outer) { for (int ry_outer = 0; ry_outer < 3; ++ry_outer) { __syncthreads(); pad_temp_shared[((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)))] = (((1 <= (((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer)) && (1 <= ((int)blockIdx.x))) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 1))] = ((1 <= (((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer)) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 2))] = ((1 <= (((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer)) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 3))] = ((1 <= (((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer)) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 4))] = ((1 <= (((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer)) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) - 25))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 5))] = (((1 <= (((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer)) && (((int)blockIdx.x) < 6)) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) - 24))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 6))] = ((((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer) < 28) && (1 <= ((int)blockIdx.x))) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) - 1))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 7))] = (((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer) < 28) ? data[(((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 8))] = (((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer) < 28) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) + 1))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 9))] = (((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer) < 28) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) + 2))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 10))] = (((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer) < 28) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) + 3))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 11))] = ((((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer) < 28) && (((int)blockIdx.x) < 6)) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) + 4))] : 0.000000e+00f); if (((((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) / 24) + ((int)threadIdx.z)) < 24) { if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.x) * 2)) + ((((int)threadIdx.y) * 11) / 3)) < 576) { if ((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) < 1728) { if (((((int)threadIdx.y) * 11) + (((int)threadIdx.x) * 6)) < 72) { if ((((((int)blockIdx.z) * 24) + (((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) / 24)) + ((int)threadIdx.z)) < 96) { kernel_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)))] = kernel[((((((((((int)blockIdx.z) * 41472) + ((((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) / 24) * 1728)) + (((int)threadIdx.z) * 1728)) + (rc_outer * 216)) + ((((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) % 24) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.y) * 11) % 3)))]; } } } } } if (((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 1) / 3)) / 24) + ((int)threadIdx.z)) < 24) { if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.x) * 2)) + (((((int)threadIdx.y) * 11) + 1) / 3)) < 576) { if ((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) < 1727) { if (((((int)threadIdx.y) * 11) + (((int)threadIdx.x) * 6)) < 71) { if ((((((int)blockIdx.z) * 24) + (((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 1) / 3)) / 24)) + ((int)threadIdx.z)) < 96) { kernel_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) + 1))] = kernel[((((((((((int)blockIdx.z) * 41472) + ((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 1) / 3)) / 24) * 1728)) + (((int)threadIdx.z) * 1728)) + (rc_outer * 216)) + ((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 1) / 3)) % 24) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.y) * 11) + 1) % 3)))]; } } } } } if (((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 2) / 3)) / 24) + ((int)threadIdx.z)) < 24) { if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.x) * 2)) + (((((int)threadIdx.y) * 11) + 2) / 3)) < 576) { if ((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) < 1726) { if (((((int)threadIdx.y) * 11) + (((int)threadIdx.x) * 6)) < 70) { if ((((((int)blockIdx.z) * 24) + (((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 2) / 3)) / 24)) + ((int)threadIdx.z)) < 96) { kernel_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) + 2))] = kernel[((((((((((int)blockIdx.z) * 41472) + ((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 2) / 3)) / 24) * 1728)) + (((int)threadIdx.z) * 1728)) + (rc_outer * 216)) + ((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 2) / 3)) % 24) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.y) * 11) + 2) % 3)))]; } } } } } if ((((((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) + 1) / 24) + ((int)threadIdx.z)) < 24) { if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.x) * 2)) + ((((int)threadIdx.y) * 11) / 3)) < 575) { if ((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) < 1725) { if (((((int)threadIdx.y) * 11) + (((int)threadIdx.x) * 6)) < 69) { if ((((((int)blockIdx.z) * 24) + ((((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) + 1) / 24)) + ((int)threadIdx.z)) < 96) { kernel_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) + 3))] = kernel[((((((((((int)blockIdx.z) * 41472) + (((((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) + 1) / 24) * 1728)) + (((int)threadIdx.z) * 1728)) + (rc_outer * 216)) + (((((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) + 1) % 24) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.y) * 11) % 3)))]; } } } } } if (((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 4) / 3)) / 24) + ((int)threadIdx.z)) < 24) { if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.x) * 2)) + (((((int)threadIdx.y) * 11) + 4) / 3)) < 576) { if ((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) < 1724) { if (((((int)threadIdx.y) * 11) + (((int)threadIdx.x) * 6)) < 68) { if ((((((int)blockIdx.z) * 24) + (((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 4) / 3)) / 24)) + ((int)threadIdx.z)) < 96) { kernel_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) + 4))] = kernel[((((((((((int)blockIdx.z) * 41472) + ((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 4) / 3)) / 24) * 1728)) + (((int)threadIdx.z) * 1728)) + (rc_outer * 216)) + ((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 4) / 3)) % 24) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.y) * 11) + 1) % 3)))]; } } } } } if (((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 5) / 3)) / 24) + ((int)threadIdx.z)) < 24) { if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.x) * 2)) + (((((int)threadIdx.y) * 11) + 5) / 3)) < 576) { if ((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) < 1723) { if (((((int)threadIdx.y) * 11) + (((int)threadIdx.x) * 6)) < 67) { if (((int)threadIdx.x) < 1) { kernel_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) + 5))] = kernel[((((((((((int)blockIdx.z) * 41472) + (((int)threadIdx.z) * 1728)) + (rc_outer * 216)) + (((int)threadIdx.x) * 18)) + ((((((int)threadIdx.y) * 11) + 5) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.y) * 11) + 2) % 3)))]; } } } } } __syncthreads(); for (int rc_inner_outer = 0; rc_inner_outer < 24; ++rc_inner_outer) { pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)))]; pad_temp_shared_local[(4)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 42))]; pad_temp_shared_local[(8)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 84))]; pad_temp_shared_local[(12)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 126))]; pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 1))]; pad_temp_shared_local[(5)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 43))]; pad_temp_shared_local[(9)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 85))]; pad_temp_shared_local[(13)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 127))]; pad_temp_shared_local[(2)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 2))]; pad_temp_shared_local[(6)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 44))]; pad_temp_shared_local[(10)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 86))]; pad_temp_shared_local[(14)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 128))]; pad_temp_shared_local[(3)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 3))]; pad_temp_shared_local[(7)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 45))]; pad_temp_shared_local[(11)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 87))]; pad_temp_shared_local[(15)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 129))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 72) + (rc_inner_outer * 3)))]; kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 72) + (rc_inner_outer * 3)) + 1))]; kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 72) + (rc_inner_outer * 3)) + 2))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(1)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(1)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(2)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(2)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(2)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(2)])); } } } compute[((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)))] = compute_local[(0)]; compute[(((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)) + 196))] = compute_local[(2)]; compute[(((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)) + 392))] = compute_local[(4)]; compute[(((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)) + 588))] = compute_local[(6)]; compute[(((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)) + 1))] = compute_local[(1)]; compute[(((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)) + 197))] = compute_local[(3)]; compute[(((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)) + 393))] = compute_local[(5)]; compute[(((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)) + 589))] = compute_local[(7)]; } class ConvGemm{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvGemm::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvGemm::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvWinogradeNon{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvWinogradeNon::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvWinogradeNon::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvFFT{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvFFT::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_FFT, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvFFT::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_FFT, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } __device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start, unsigned int h_end, unsigned int h_offset, unsigned int c_start, unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){ switch(h_offset){ case 0: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; case 1: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; } } __device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){ switch(write_h){ case 1: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; case 2: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; } } __global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){ extern __shared__ float shared_input[]; const unsigned int tile_id = blockIdx.x; const unsigned int tc_id = tile_id / THS; const unsigned int th_id = tile_id % THS; const unsigned int tw_id = threadIdx.x / N; const int h_out_start = th_id * TH; const int w_out_start = tw_id * TW; const unsigned int warp_id = tw_id; const unsigned int lane_id = threadIdx.x % N; float data_array[9]; float temp_result[TH*TW] = {0.0f}; for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){ shared_input[i] = 0.0f; } unsigned int n = lane_id; unsigned int c_offset = tc_id * TC; int h_offset = (h_out_start == 0)?1:0; int h_padded_start = h_out_start; int h_padded_end = min(h_padded_start + TH + 2, H + 2); int h_non_padded_start = max(h_out_start - 1, 0); int h_non_padded_end = min(H, h_padded_end - 1); __syncthreads(); load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N); __syncthreads(); #pragma unroll for(unsigned int c=0;c<TC;c++){ #pragma unroll for(unsigned int r=0;r<R;++r){ #pragma unroll for(unsigned int s=0;s<S;++s){ data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n]; } } temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[8]; } switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result); } float check_diff(float *x, float *y, unsigned int size){ float diff = 0.0f; #pragma omp parallel for reduction(+ : diff) for(unsigned int i=0;i<size;++i){ diff += abs(x[i] - y[i]); } return diff; } int main(void){ float *input = new float[C*H*W]; time_t t; float *matrix; hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); srand((unsigned) time(&t)); for(int i =0;i<C*H*W;++i){ input[i] = rand() % 10; } float *device_input; hipMalloc(&device_input,C*H*W*sizeof(float)); hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice); float *K = new float[C*N*9]; for(int i=0;i<C*N*9;++i){ K[i] = 1.0f; } ConvGemm convGemm; convGemm.initialize(); ConvWinogradeNon convWinogradeNon; convWinogradeNon.initialize(); ConvFFT convFFT; convFFT.initialize(); float *out_cudnn; float *out_cudnn_host = new float[N*H*W]; hipEvent_t event_start; hipEvent_t event_stop; hipEventCreate(&event_start); hipEventCreate(&event_stop); out_cudnn = convGemm.forward(device_input); hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost); out_cudnn = convFFT.forward(device_input); out_cudnn = convWinogradeNon.forward(device_input); float *device_K; float *device_out; hipMalloc(&device_out,H*W*N*sizeof(float)); hipMemset(device_out,0,H*W*N*sizeof(float)); hipMalloc(&device_K,C*N*9*sizeof(float)); hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice); hipEventRecord(event_start); convGemm.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnGemmTime; hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop); hipEventRecord(event_start); convWinogradeNon.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnWinogradeTimeNon; hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop); hipEventRecord(event_start); convFFT.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnFFTTime; hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop); dim3 grid(7,1,4); dim3 block(2,7,24); hipEventRecord(event_start); hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float time_tvm; hipEventElapsedTime(&time_tvm, event_start, event_stop); float *out_tvm = new float[N*H*W]; hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost); hipMemset(device_out, 0, sizeof(float)*N*H*W); chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4)); hipEventRecord(event_start); hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float time_tdc; hipEventElapsedTime(&time_tdc, event_start, event_stop); float *out_tdc = new float[N*H*W]; hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost); ofstream outfile; char buffer[1000]; int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W, cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc, cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc); outfile.open("../../evaluation_outcome/2080Ti-layers-eval-modeling.csv", std::ios_base::app); outfile << buffer; float difference = check_diff(out_cudnn_host, out_tdc, N*H*W); cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<< time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<< cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl; return 0; }
ef2874478646f05217d7525636c8ba73186f9647.cu
#include <cudnn.h> #include <stdio.h> #include <cuda.h> #include <malloc.h> #include <cstdlib> #include <time.h> #include <iostream> #include <sys/types.h> #include <errno.h> #include <vector> #include <fstream> #include <string> #include <omp.h> #define TH 2 #define TW 4 #define TC 16 #define C 192 #define N 96 #define H 28 #define W 28 #define TCS ((C-1)/TC + 1) #define THS ((H-1)/TH + 1) #define TWS ((W-1)/TW+1) #define WPAD (TWS*TW + 2) #define R 3 #define S 3 using namespace std; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } inline void chkerr(cudaError_t code) { if (code != cudaSuccess) { std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl; exit(-1); } } extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) { float compute_local[8]; __shared__ float pad_temp_shared[4032]; __shared__ float kernel_shared[1728]; float pad_temp_shared_local[16]; float kernel_shared_local[3]; compute_local[(0)] = 0.000000e+00f; compute_local[(2)] = 0.000000e+00f; compute_local[(4)] = 0.000000e+00f; compute_local[(6)] = 0.000000e+00f; compute_local[(1)] = 0.000000e+00f; compute_local[(3)] = 0.000000e+00f; compute_local[(5)] = 0.000000e+00f; compute_local[(7)] = 0.000000e+00f; for (int rc_outer = 0; rc_outer < 8; ++rc_outer) { for (int ry_outer = 0; ry_outer < 3; ++ry_outer) { __syncthreads(); pad_temp_shared[((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)))] = (((1 <= (((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer)) && (1 <= ((int)blockIdx.x))) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 1))] = ((1 <= (((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer)) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 2))] = ((1 <= (((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer)) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 3))] = ((1 <= (((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer)) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 4))] = ((1 <= (((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer)) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) - 25))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 5))] = (((1 <= (((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer)) && (((int)blockIdx.x) < 6)) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) - 24))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 6))] = ((((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer) < 28) && (1 <= ((int)blockIdx.x))) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) - 1))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 7))] = (((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer) < 28) ? data[(((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 8))] = (((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer) < 28) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) + 1))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 9))] = (((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer) < 28) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) + 2))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 10))] = (((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer) < 28) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) + 3))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 168) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 12)) + 11))] = ((((((((int)threadIdx.y) * 4) + (((int)threadIdx.x) * 2)) + ry_outer) < 28) && (((int)blockIdx.x) < 6)) ? data[((((((((rc_outer * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 112)) + (((int)threadIdx.x) * 56)) + (ry_outer * 28)) + (((int)blockIdx.x) * 4)) + 4))] : 0.000000e+00f); if (((((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) / 24) + ((int)threadIdx.z)) < 24) { if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.x) * 2)) + ((((int)threadIdx.y) * 11) / 3)) < 576) { if ((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) < 1728) { if (((((int)threadIdx.y) * 11) + (((int)threadIdx.x) * 6)) < 72) { if ((((((int)blockIdx.z) * 24) + (((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) / 24)) + ((int)threadIdx.z)) < 96) { kernel_shared[((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)))] = kernel[((((((((((int)blockIdx.z) * 41472) + ((((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) / 24) * 1728)) + (((int)threadIdx.z) * 1728)) + (rc_outer * 216)) + ((((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) % 24) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.y) * 11) % 3)))]; } } } } } if (((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 1) / 3)) / 24) + ((int)threadIdx.z)) < 24) { if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.x) * 2)) + (((((int)threadIdx.y) * 11) + 1) / 3)) < 576) { if ((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) < 1727) { if (((((int)threadIdx.y) * 11) + (((int)threadIdx.x) * 6)) < 71) { if ((((((int)blockIdx.z) * 24) + (((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 1) / 3)) / 24)) + ((int)threadIdx.z)) < 96) { kernel_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) + 1))] = kernel[((((((((((int)blockIdx.z) * 41472) + ((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 1) / 3)) / 24) * 1728)) + (((int)threadIdx.z) * 1728)) + (rc_outer * 216)) + ((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 1) / 3)) % 24) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.y) * 11) + 1) % 3)))]; } } } } } if (((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 2) / 3)) / 24) + ((int)threadIdx.z)) < 24) { if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.x) * 2)) + (((((int)threadIdx.y) * 11) + 2) / 3)) < 576) { if ((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) < 1726) { if (((((int)threadIdx.y) * 11) + (((int)threadIdx.x) * 6)) < 70) { if ((((((int)blockIdx.z) * 24) + (((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 2) / 3)) / 24)) + ((int)threadIdx.z)) < 96) { kernel_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) + 2))] = kernel[((((((((((int)blockIdx.z) * 41472) + ((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 2) / 3)) / 24) * 1728)) + (((int)threadIdx.z) * 1728)) + (rc_outer * 216)) + ((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 2) / 3)) % 24) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.y) * 11) + 2) % 3)))]; } } } } } if ((((((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) + 1) / 24) + ((int)threadIdx.z)) < 24) { if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.x) * 2)) + ((((int)threadIdx.y) * 11) / 3)) < 575) { if ((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) < 1725) { if (((((int)threadIdx.y) * 11) + (((int)threadIdx.x) * 6)) < 69) { if ((((((int)blockIdx.z) * 24) + ((((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) + 1) / 24)) + ((int)threadIdx.z)) < 96) { kernel_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) + 3))] = kernel[((((((((((int)blockIdx.z) * 41472) + (((((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) + 1) / 24) * 1728)) + (((int)threadIdx.z) * 1728)) + (rc_outer * 216)) + (((((((int)threadIdx.x) * 2) + ((((int)threadIdx.y) * 11) / 3)) + 1) % 24) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.y) * 11) % 3)))]; } } } } } if (((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 4) / 3)) / 24) + ((int)threadIdx.z)) < 24) { if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.x) * 2)) + (((((int)threadIdx.y) * 11) + 4) / 3)) < 576) { if ((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) < 1724) { if (((((int)threadIdx.y) * 11) + (((int)threadIdx.x) * 6)) < 68) { if ((((((int)blockIdx.z) * 24) + (((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 4) / 3)) / 24)) + ((int)threadIdx.z)) < 96) { kernel_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) + 4))] = kernel[((((((((((int)blockIdx.z) * 41472) + ((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 4) / 3)) / 24) * 1728)) + (((int)threadIdx.z) * 1728)) + (rc_outer * 216)) + ((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 4) / 3)) % 24) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.y) * 11) + 1) % 3)))]; } } } } } if (((((((int)threadIdx.x) * 2) + (((((int)threadIdx.y) * 11) + 5) / 3)) / 24) + ((int)threadIdx.z)) < 24) { if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.x) * 2)) + (((((int)threadIdx.y) * 11) + 5) / 3)) < 576) { if ((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) < 1723) { if (((((int)threadIdx.y) * 11) + (((int)threadIdx.x) * 6)) < 67) { if (((int)threadIdx.x) < 1) { kernel_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 11)) + (((int)threadIdx.x) * 6)) + 5))] = kernel[((((((((((int)blockIdx.z) * 41472) + (((int)threadIdx.z) * 1728)) + (rc_outer * 216)) + (((int)threadIdx.x) * 18)) + ((((((int)threadIdx.y) * 11) + 5) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.y) * 11) + 2) % 3)))]; } } } } } __syncthreads(); for (int rc_inner_outer = 0; rc_inner_outer < 24; ++rc_inner_outer) { pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)))]; pad_temp_shared_local[(4)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 42))]; pad_temp_shared_local[(8)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 84))]; pad_temp_shared_local[(12)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 126))]; pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 1))]; pad_temp_shared_local[(5)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 43))]; pad_temp_shared_local[(9)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 85))]; pad_temp_shared_local[(13)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 127))]; pad_temp_shared_local[(2)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 2))]; pad_temp_shared_local[(6)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 44))]; pad_temp_shared_local[(10)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 86))]; pad_temp_shared_local[(14)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 128))]; pad_temp_shared_local[(3)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 3))]; pad_temp_shared_local[(7)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 45))]; pad_temp_shared_local[(11)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 87))]; pad_temp_shared_local[(15)] = pad_temp_shared[(((((rc_inner_outer * 168) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 2)) + 129))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 72) + (rc_inner_outer * 3)))]; kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 72) + (rc_inner_outer * 3)) + 1))]; kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 72) + (rc_inner_outer * 3)) + 2))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(1)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(1)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(2)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(2)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(2)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(2)])); } } } compute[((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)))] = compute_local[(0)]; compute[(((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)) + 196))] = compute_local[(2)]; compute[(((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)) + 392))] = compute_local[(4)]; compute[(((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)) + 588))] = compute_local[(6)]; compute[(((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)) + 1))] = compute_local[(1)]; compute[(((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)) + 197))] = compute_local[(3)]; compute[(((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)) + 393))] = compute_local[(5)]; compute[(((((((((int)blockIdx.z) * 18816) + (((int)threadIdx.z) * 784)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + (((int)threadIdx.x) * 2)) + 589))] = compute_local[(7)]; } class ConvGemm{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvGemm::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvGemm::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvWinogradeNon{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvWinogradeNon::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvWinogradeNon::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvFFT{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvFFT::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_FFT, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvFFT::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_FFT, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } __device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start, unsigned int h_end, unsigned int h_offset, unsigned int c_start, unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){ switch(h_offset){ case 0: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; case 1: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; } } __device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){ switch(write_h){ case 1: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; case 2: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; } } __global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){ extern __shared__ float shared_input[]; const unsigned int tile_id = blockIdx.x; const unsigned int tc_id = tile_id / THS; const unsigned int th_id = tile_id % THS; const unsigned int tw_id = threadIdx.x / N; const int h_out_start = th_id * TH; const int w_out_start = tw_id * TW; const unsigned int warp_id = tw_id; const unsigned int lane_id = threadIdx.x % N; float data_array[9]; float temp_result[TH*TW] = {0.0f}; for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){ shared_input[i] = 0.0f; } unsigned int n = lane_id; unsigned int c_offset = tc_id * TC; int h_offset = (h_out_start == 0)?1:0; int h_padded_start = h_out_start; int h_padded_end = min(h_padded_start + TH + 2, H + 2); int h_non_padded_start = max(h_out_start - 1, 0); int h_non_padded_end = min(H, h_padded_end - 1); __syncthreads(); load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N); __syncthreads(); #pragma unroll for(unsigned int c=0;c<TC;c++){ #pragma unroll for(unsigned int r=0;r<R;++r){ #pragma unroll for(unsigned int s=0;s<S;++s){ data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n]; } } temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[8]; } switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result); } float check_diff(float *x, float *y, unsigned int size){ float diff = 0.0f; #pragma omp parallel for reduction(+ : diff) for(unsigned int i=0;i<size;++i){ diff += abs(x[i] - y[i]); } return diff; } int main(void){ float *input = new float[C*H*W]; time_t t; float *matrix; cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); srand((unsigned) time(&t)); for(int i =0;i<C*H*W;++i){ input[i] = rand() % 10; } float *device_input; cudaMalloc(&device_input,C*H*W*sizeof(float)); cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice); float *K = new float[C*N*9]; for(int i=0;i<C*N*9;++i){ K[i] = 1.0f; } ConvGemm convGemm; convGemm.initialize(); ConvWinogradeNon convWinogradeNon; convWinogradeNon.initialize(); ConvFFT convFFT; convFFT.initialize(); float *out_cudnn; float *out_cudnn_host = new float[N*H*W]; cudaEvent_t event_start; cudaEvent_t event_stop; cudaEventCreate(&event_start); cudaEventCreate(&event_stop); out_cudnn = convGemm.forward(device_input); cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); out_cudnn = convFFT.forward(device_input); out_cudnn = convWinogradeNon.forward(device_input); float *device_K; float *device_out; cudaMalloc(&device_out,H*W*N*sizeof(float)); cudaMemset(device_out,0,H*W*N*sizeof(float)); cudaMalloc(&device_K,C*N*9*sizeof(float)); cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice); cudaEventRecord(event_start); convGemm.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnGemmTime; cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop); cudaEventRecord(event_start); convWinogradeNon.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnWinogradeTimeNon; cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop); cudaEventRecord(event_start); convFFT.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnFFTTime; cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop); dim3 grid(7,1,4); dim3 block(2,7,24); cudaEventRecord(event_start); default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float time_tvm; cudaEventElapsedTime(&time_tvm, event_start, event_stop); float *out_tvm = new float[N*H*W]; cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); cudaMemset(device_out, 0, sizeof(float)*N*H*W); chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4)); cudaEventRecord(event_start); conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float time_tdc; cudaEventElapsedTime(&time_tdc, event_start, event_stop); float *out_tdc = new float[N*H*W]; cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); ofstream outfile; char buffer[1000]; int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W, cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc, cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc); outfile.open("../../evaluation_outcome/2080Ti-layers-eval-modeling.csv", std::ios_base::app); outfile << buffer; float difference = check_diff(out_cudnn_host, out_tdc, N*H*W); cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<< time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<< cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl; return 0; }
94282aa2839972c27f8195e6652352ef64cab144.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @generated from ziterilu.cu normal z -> c, Sun May 3 11:22:58 2015 */ #include "common_magmasparse.h" #define PRECISION_c __global__ void magma_citerilu_csr_kernel( magma_int_t num_rows, magma_int_t nnz, magma_index_t *rowidxA, magma_index_t *colidxA, const magmaFloatComplex * __restrict__ A, magma_index_t *rowptrL, magma_index_t *colidxL, magmaFloatComplex *valL, magma_index_t *rowptrU, magma_index_t *rowidxU, magmaFloatComplex *valU ){ int i, j; int k = blockDim.x * blockIdx.x + threadIdx.x; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex s, sp; int il, iu, jl, ju; if (k < nnz) { i = rowidxA[k]; j = colidxA[k]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) s = __ldg( A+k ); #else s = A[k]; #endif il = rowptrL[i]; iu = rowptrU[j]; while (il < rowptrL[i+1] && iu < rowptrU[j+1]) { sp = zero; jl = colidxL[il]; ju = rowidxU[iu]; // avoid branching sp = ( jl == ju ) ? valL[il] * valU[iu] : sp; s = ( jl == ju ) ? s-sp : s; il = ( jl <= ju ) ? il+1 : il; iu = ( jl >= ju ) ? iu+1 : iu; } // undo the last operation (it must be the last) s += sp; __syncthreads(); if ( i>j ) // modify l entry valL[il-1] = s / valU[rowptrU[j+1]-1]; else{ // modify u entry valU[iu-1] = s; } } }// kernel /** Purpose ------- This routine iteratively computes an incomplete LU factorization. The idea is according to Edmond Chow's presentation at SIAM 2014. This routine was used in the ISC 2015 paper: E. Chow et al.: 'Study of an Asynchronous Iterative Algorithm for Computing Incomplete Factorizations on GPUs' The input format of the matrix is Magma_CSRCOO for the upper and lower triangular parts. Note however, that we flip col and rowidx for the U-part. Every component of L and U is handled by one thread. Arguments --------- @param[in] A magma_c_matrix input matrix A determing initial guess & processing order @param[in][out] L magma_c_matrix input/output matrix L containing the ILU approximation @param[in][out] U magma_c_matrix input/output matrix U containing the ILU approximation @param[in] A_CSR magma_c_matrix input/output matrix containing the IC approximation @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_citerilu_csr( magma_c_matrix A, magma_c_matrix L, magma_c_matrix U, magma_queue_t queue ){ int blocksize1 = 128; int blocksize2 = 1; int dimgrid1 = ( A.nnz + blocksize1 -1 ) / blocksize1; int dimgrid2 = 1; int dimgrid3 = 1; // Runtime API // hipFuncCachePreferShared: shared memory is 48 KB // hipFuncCachePreferEqual: shared memory is 32 KB // hipFuncCachePreferL1: shared memory is 16 KB // hipFuncCachePreferNone: no preference //hipFuncSetCacheConfig(hipFuncCachePreferShared); hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_citerilu_csr_kernel), dim3(grid), dim3(block), 0, magma_stream , A.num_rows, A.nnz, A.rowidx, A.col, A.val, L.row, L.col, L.val, U.row, U.col, U.val ); return MAGMA_SUCCESS; }
94282aa2839972c27f8195e6652352ef64cab144.cu
/* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @generated from ziterilu.cu normal z -> c, Sun May 3 11:22:58 2015 */ #include "common_magmasparse.h" #define PRECISION_c __global__ void magma_citerilu_csr_kernel( magma_int_t num_rows, magma_int_t nnz, magma_index_t *rowidxA, magma_index_t *colidxA, const magmaFloatComplex * __restrict__ A, magma_index_t *rowptrL, magma_index_t *colidxL, magmaFloatComplex *valL, magma_index_t *rowptrU, magma_index_t *rowidxU, magmaFloatComplex *valU ){ int i, j; int k = blockDim.x * blockIdx.x + threadIdx.x; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex s, sp; int il, iu, jl, ju; if (k < nnz) { i = rowidxA[k]; j = colidxA[k]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) s = __ldg( A+k ); #else s = A[k]; #endif il = rowptrL[i]; iu = rowptrU[j]; while (il < rowptrL[i+1] && iu < rowptrU[j+1]) { sp = zero; jl = colidxL[il]; ju = rowidxU[iu]; // avoid branching sp = ( jl == ju ) ? valL[il] * valU[iu] : sp; s = ( jl == ju ) ? s-sp : s; il = ( jl <= ju ) ? il+1 : il; iu = ( jl >= ju ) ? iu+1 : iu; } // undo the last operation (it must be the last) s += sp; __syncthreads(); if ( i>j ) // modify l entry valL[il-1] = s / valU[rowptrU[j+1]-1]; else{ // modify u entry valU[iu-1] = s; } } }// kernel /** Purpose ------- This routine iteratively computes an incomplete LU factorization. The idea is according to Edmond Chow's presentation at SIAM 2014. This routine was used in the ISC 2015 paper: E. Chow et al.: 'Study of an Asynchronous Iterative Algorithm for Computing Incomplete Factorizations on GPUs' The input format of the matrix is Magma_CSRCOO for the upper and lower triangular parts. Note however, that we flip col and rowidx for the U-part. Every component of L and U is handled by one thread. Arguments --------- @param[in] A magma_c_matrix input matrix A determing initial guess & processing order @param[in][out] L magma_c_matrix input/output matrix L containing the ILU approximation @param[in][out] U magma_c_matrix input/output matrix U containing the ILU approximation @param[in] A_CSR magma_c_matrix input/output matrix containing the IC approximation @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_citerilu_csr( magma_c_matrix A, magma_c_matrix L, magma_c_matrix U, magma_queue_t queue ){ int blocksize1 = 128; int blocksize2 = 1; int dimgrid1 = ( A.nnz + blocksize1 -1 ) / blocksize1; int dimgrid2 = 1; int dimgrid3 = 1; // Runtime API // cudaFuncCachePreferShared: shared memory is 48 KB // cudaFuncCachePreferEqual: shared memory is 32 KB // cudaFuncCachePreferL1: shared memory is 16 KB // cudaFuncCachePreferNone: no preference //cudaFuncSetCacheConfig(cudaFuncCachePreferShared); cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_citerilu_csr_kernel<<< grid, block, 0, magma_stream >>> ( A.num_rows, A.nnz, A.rowidx, A.col, A.val, L.row, L.col, L.val, U.row, U.col, U.val ); return MAGMA_SUCCESS; }
a1455cf2412fe7a44a26156adcd4d2c08b45996b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void ge_fmin (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, REAL* c, const int offset_c, const int ld_c) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < fd); if (valid) { c[offset_c + gid_0 + gid_1 * ld_c] = CAST(fmin)(a[offset_a + gid_0 + gid_1 * ld_a], b[offset_b + gid_0 + gid_1 * ld_b]); } }
a1455cf2412fe7a44a26156adcd4d2c08b45996b.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void ge_fmin (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, REAL* c, const int offset_c, const int ld_c) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < fd); if (valid) { c[offset_c + gid_0 + gid_1 * ld_c] = CAST(fmin)(a[offset_a + gid_0 + gid_1 * ld_a], b[offset_b + gid_0 + gid_1 * ld_b]); } }
b04cbacf107eefc566051e25b0cd54e9e9983c7a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include <stdio.h> #include <algorithm> #include <stdlib.h> #include <time.h> //#include <mpir.h> using namespace std; struct Point { int x = 0; int y = 0; }; __device__ int xgcd(int a, int b, int *x, int *y) { int prevx = 1, x1 = 0, prevy = 0, y1 = 1; while (b) { int q = a/b; prevx = x1; x1 = prevx - q*x1; prevy = y1; y1 = prevy - q*y1; b = a % b; a = b; } *x = prevx; *y = prevy; return a; } __global__ void modinv(int *x, int *y, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { int a, b; int g = xgcd(x[i], y[i], &a, &b); if (g != 1) ; else ((a%y[i] + y[i]) % y[i]); } } /* Point elliptic_add(Point P, Point Q, int a, int b, int n) { if (P.x == 0 && P.y == 0) return Q; else if(Q.x == 0 && Q.y == 0) return P; int x1 = P.x; int y1 = P.y; int x2 = Q.x; int y2 = Q.y; int num = 0, den = 0; if(P.x == Q.x && P.y == Q.y) { if (2*y1%n == 0) { Point temp; return temp; } else { num = (3*(pow(x1,2) + a) % n; den = (2*y1) % n; } } else { if((x2-x1)%n == 0) { Point temp; return temp; } else { num = (y2- y1) % n den = (x2- x1) % n } } if(__gcd(den, n) != 1) { Point temp; temp.x = -1; temp.y = -1; return temp; } else den = modinv(den,n); int slope = (num * den) % n; int xR = (pow(slope,2)%n) -x1 -x2) %n; int yR = (slope*(x1- ((pow(slope,2)%n) - x1 - x2)) - y1) % n Point temp2; temp2.x = xR; temp2.y = yR; return temp2; } Point elliptic_mul(int d, Point P, int a, int n) { int b=0; if(P.x != 0 && P.y != 0) b = (pow(P.y,2) - pow(P.x,3) - a*P.x) % n; Point tempR; tempR.x = P.x; tempR.y = P.y; while(d > 1) { Point temp = elliptic_add(P,tempR,a,b,n); tempR.x = temp.x; tempR.y = temp.y; d-= 1; } return tempR; } int lenstra(Point P, int a, int n) { int i = 0; while(i < n) { Point tempR; tempR = elliptic_mul(int(tgamma(i+1)), P, a, n); i += 1; if(tempR.x == -1 && tempR.y == -1) return n; if(__gcd((2*tempR.y)%n,n) != 1) return (__gcd(2*tempR.y,n)%n); } return n; } Point rand_elliptic(int n, int &a) { bool state = 0; while(state != 1) { Point P0; srand((unsigned)time(NULL)); P0.x = rand()%n + 1; P0.y = rand()%n + 1; a = rand()%n + 1; int b = (pow(P0.y,2) - pow(P0.x,3) - a*P0.x) % n; state = ((pow(P0.y,2))%n == (pow(P0.x,3))%n + ((a*P0.x)%n) + b); } return P0; } int lenstra_random(int n) { int g = n; int a = 0; while(g == n) { Point P0 = rand_elliptic(n,a); g = lenstra(P0,a,n); int b = (pow(P0.y,2) - pow(P0.x,3) - a*P0.x) % n; } return g; } */ int main(void) { int N = 1<<27; int *x, *y; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(int)); hipMallocManaged(&y, N*sizeof(int)); srand((unsigned)time(NULL)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = rand()%10+1; y[i] = 11; } // Run kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( modinv), dim3(numBlocks), dim3(blockSize), 0, 0, x, y, N); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Free memory hipFree(x); hipFree(y); return 0; }
b04cbacf107eefc566051e25b0cd54e9e9983c7a.cu
#include <iostream> #include <math.h> #include <stdio.h> #include <algorithm> #include <stdlib.h> #include <time.h> //#include <mpir.h> using namespace std; struct Point { int x = 0; int y = 0; }; __device__ int xgcd(int a, int b, int *x, int *y) { int prevx = 1, x1 = 0, prevy = 0, y1 = 1; while (b) { int q = a/b; prevx = x1; x1 = prevx - q*x1; prevy = y1; y1 = prevy - q*y1; b = a % b; a = b; } *x = prevx; *y = prevy; return a; } __global__ void modinv(int *x, int *y, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { int a, b; int g = xgcd(x[i], y[i], &a, &b); if (g != 1) ; else ((a%y[i] + y[i]) % y[i]); } } /* Point elliptic_add(Point P, Point Q, int a, int b, int n) { if (P.x == 0 && P.y == 0) return Q; else if(Q.x == 0 && Q.y == 0) return P; int x1 = P.x; int y1 = P.y; int x2 = Q.x; int y2 = Q.y; int num = 0, den = 0; if(P.x == Q.x && P.y == Q.y) { if (2*y1%n == 0) { Point temp; return temp; } else { num = (3*(pow(x1,2) + a) % n; den = (2*y1) % n; } } else { if((x2-x1)%n == 0) { Point temp; return temp; } else { num = (y2- y1) % n den = (x2- x1) % n } } if(__gcd(den, n) != 1) { Point temp; temp.x = -1; temp.y = -1; return temp; } else den = modinv(den,n); int slope = (num * den) % n; int xR = (pow(slope,2)%n) -x1 -x2) %n; int yR = (slope*(x1- ((pow(slope,2)%n) - x1 - x2)) - y1) % n Point temp2; temp2.x = xR; temp2.y = yR; return temp2; } Point elliptic_mul(int d, Point P, int a, int n) { int b=0; if(P.x != 0 && P.y != 0) b = (pow(P.y,2) - pow(P.x,3) - a*P.x) % n; Point tempR; tempR.x = P.x; tempR.y = P.y; while(d > 1) { Point temp = elliptic_add(P,tempR,a,b,n); tempR.x = temp.x; tempR.y = temp.y; d-= 1; } return tempR; } int lenstra(Point P, int a, int n) { int i = 0; while(i < n) { Point tempR; tempR = elliptic_mul(int(tgamma(i+1)), P, a, n); i += 1; if(tempR.x == -1 && tempR.y == -1) return n; if(__gcd((2*tempR.y)%n,n) != 1) return (__gcd(2*tempR.y,n)%n); } return n; } Point rand_elliptic(int n, int &a) { bool state = 0; while(state != 1) { Point P0; srand((unsigned)time(NULL)); P0.x = rand()%n + 1; P0.y = rand()%n + 1; a = rand()%n + 1; int b = (pow(P0.y,2) - pow(P0.x,3) - a*P0.x) % n; state = ((pow(P0.y,2))%n == (pow(P0.x,3))%n + ((a*P0.x)%n) + b); } return P0; } int lenstra_random(int n) { int g = n; int a = 0; while(g == n) { Point P0 = rand_elliptic(n,a); g = lenstra(P0,a,n); int b = (pow(P0.y,2) - pow(P0.x,3) - a*P0.x) % n; } return g; } */ int main(void) { int N = 1<<27; int *x, *y; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(int)); cudaMallocManaged(&y, N*sizeof(int)); srand((unsigned)time(NULL)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = rand()%10+1; y[i] = 11; } // Run kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; modinv<<<numBlocks, blockSize>>>(x, y, N); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Free memory cudaFree(x); cudaFree(y); return 0; }
66bc505b4b828abe925542ae0d03cc8468def19d.hip
// !!! This is a file automatically generated by hipify!!! #include "LF_Utils.cuh" uint8_t* alloc_uint8(size_t size, std::string alloc_type) { hipError_t err; uint8_t* buf; if (alloc_type == "pinned") { err = hipHostMalloc((void**)&buf, size); assert(err == hipSuccess); memset(buf, 0, size); } else if (alloc_type == "pageable") { buf = new uint8_t[size](); memset(buf, 0, size); } else if (alloc_type == "device") { err = hipMalloc((void**)&buf, size); err = hipMemset(buf, 0, size); assert(err == hipSuccess); } else if (alloc_type == "unified") { err = hipMallocManaged((void**)&buf, size); assert(err == hipSuccess); } else exit(1); return buf; } void free_uint8(uint8_t* buf, std::string alloc_type) { hipError_t err; if (alloc_type == "pinned") { err = hipHostFree(buf); assert(err == hipSuccess); } else if (alloc_type == "pageable") { delete[] buf; } else if (alloc_type == "device" || alloc_type == "unified") { err = hipFree(buf); assert(err == hipSuccess); } else exit(1); } int read_uint8(uint8_t* buf, std::string filename, size_t size) { int fd; int ret; fd = open(filename.c_str(), O_RDONLY | O_BINARY); ret = fd; if (ret < 0) { printf("open failed, %s\n", filename.c_str()); assert(ret == 0); exit(1); } if (size < 0) { if ((ret = lseek(fd, 0, SEEK_END)) < 0) { printf("SEEK_END failed, %s\n", filename.c_str()); assert(ret == 0); exit(1); } if ((ret = tell(fd)) < 0) { printf("tell failed, %s\n", filename.c_str()); assert(ret == 0); exit(1); } size = ret; if ((ret = lseek(fd, 0, SEEK_SET)) < 0) { printf("SEEK_SET failed, %s\n", filename.c_str()); assert(ret == 0); exit(1); } } ret = read(fd, buf, sizeof(uint8_t) * size); // x64 close(fd); if (ret != size) { printf("read failed, %s\n", filename.c_str()); assert(ret == size); exit(1); } return ret; } int write_uint8(uint8_t* buf, std::string filename, size_t size) { int fd; if ((fd = open(filename.c_str(), O_WRONLY | O_BINARY)) < 0) return fd; if (size < 0) size = _msize(buf); int ret = write(fd, buf, sizeof(uint8_t) * size); // x64 close(fd); return ret; } void StopWatch::Start() { t0 = std::chrono::high_resolution_clock::now(); } double StopWatch::Stop() { double stop = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now().time_since_epoch() - t0.time_since_epoch()).count(); return stop / 1000000.0; // ms } double getEuclideanDist(int x, int y, int origX, int origY) { return sqrt(pow(((double)x - (double)origX), 2) + pow(((double)y - (double)origY), 2)); } int clamp(int val, int min, int max) { if (val > max) return max; else if (val < min) return min; else return val; } double rad2deg(double rad) { return (rad * 180.0 / PI); } double deg2rad(double deg) { return (deg * PI / 180.0); } float deg2rad(float deg) { return (deg * PI / 180.0f); } void minmax(int val, int& min, int& max) { min = (val < min) ? val : min; max = (val > max) ? val : max; } int getKey(int& posX, int& posY) { // printf("%d, %d -> ", posX, posY); int c = getch(); switch (c) { case 'x': { posY--; } break; case 'c': { posX++; posY--; } break; case 'd': { posX++; } break; case 'e': { posX++; posY++; } break; case 'w': { posY++; } break; case 'q': { posX--; posY++; } break; case 'a': { posX--; } break; case 'z': { posX--; posY--; } break; case 27: { printf("Terminate\n"); return -1; } default: break; } posX = clamp(posX, 201, 299); posY = clamp(posY, 201, 299); return 0; } std::string IntToFormattedString(int n) { if (n < 10) return ("000" + std::to_string(n)); else if (n < 100) return ("00" + std::to_string(n)); else if (n < 1000) return ("0" + std::to_string(n)); else return (std::to_string(n)); } std::string FloatToFormattedString(float f) { if (f < 10) return ("000" + std::to_string(f)); else if (f < 100) return ("00" + std::to_string(f)); else if (f < 1000) return ("0" + std::to_string(f)); else return (std::to_string(f)); } int preRendering(int x, int z) { int width = 4096; std::pair<int, int> range[4][50]; for (int i = 0; i < 4; i++) { for (int j = 0; j < 50; j++) { range[i][j].first = 1e6; range[i][j].second = -1e6; } } int DATAW = 50; int LFUW = 100; int Y = LFUW / 2; for (int dir = 0; dir < 4; dir++) { int localPosX_front = ((x % 100) - 50); int localPosZ_front = ((z % 100) - 50); int localPosX = localPosX_front; int localPosZ = localPosZ_front; if (dir == 1) { localPosX = -1 * localPosZ_front; localPosZ = localPosX_front; } else if (dir == 2) { localPosX = -1 * localPosX_front; localPosZ = -1 * localPosZ_front; } else if (dir == 3) { localPosX = localPosZ_front; localPosZ = -1 * localPosX_front; } float theta_L = rad2deg(atan2f((-1.0f * LFUW / 2 - localPosX), (LFUW / 2 - localPosZ))); float theta_R = rad2deg(atan2f((1.0f * LFUW / 2 - localPosX), (LFUW / 2 - localPosZ))); int output_width = (int)((theta_R - theta_L) / 0.04f); for (int w = 0; w < output_width; w++) { float theta_P = theta_L + (0.04f * (float)w); // ray front (rad) // float xP = x0 + z0 * tanf(deg2rad(theta_P)); // tan -> odd function float xP = (float)(Y - localPosZ) * tanf(deg2rad(theta_P)) + localPosX; // tan -> odd function float b = sqrtf(2.0f) * LFUW; float N_dist = sqrt((float)((xP - localPosX) * (xP - localPosX) + (Y - localPosZ) * (Y - localPosZ))) / b; xP /= 2; int P_1 = (int)roundf(xP + (DATAW / 2)); if (dir == 1 || dir == 2) P_1 = DATAW - P_1 - 1; P_1 = clamp(P_1, 0, DATAW - 1); float U = (theta_P * ((1.0f) / (180.0f))) * width / 2 + width / 2; int U_1 = (int)(roundf(U)); if (dir == 1) U_1 += width / 4; if (dir == 2) U_1 += width / 2; if (dir == 3) U_1 -= width / 4; if (U_1 >= width) U_1 = U_1 - width; else if (U_1 < 0) U_1 = U_1 + width; U_1 = clamp(U_1, 0, width - 1); range[dir][P_1].first = U_1 < range[dir][P_1].first ? U_1 : range[dir][P_1].first; range[dir][P_1].second = U_1 > range[dir][P_1].second ? U_1 : range[dir][P_1].second; } } std::string filename = "S:/PixelRange/" + std::to_string(x) + "_" + std::to_string(z) + ".txt"; FILE* fp = fopen(filename.c_str(), "w"); for (int i = 0; i < 4; i++) { for (int j = 0; j < 50; j++) { if (range[i][j].first != 1e6 || range[i][j].second != -1e6) fprintf(fp, "%d\t%d\t%d\t%d\n", i, j, range[i][j].first, range[i][j].second); } } fclose(fp); return 0; } void write_rendering_range() { for (int y = 0; y < 100; y++) { for (int x = 0; x < 100; x++) { preRendering(x, y); } } } int getLFUID(const int& posX, const int& posY) { return 56 * (posX / 100) + (posY / 100); } void find_LF_number_BMW(int& front, int& right, int& back, int& left, const int& LFUID) { // 56x6 LFU left = LFUID; right = left + 56; back = (LFUID / 56) + (6 * (LFUID % 56)) + 392; front = back + 6; } void getLocalPosition(int& localPosX, int& localPosY, const int& curPosX, const int& curPosY) { localPosX = curPosX % 100 - 50; localPosY = curPosY % 100 - 50; } void write_bmw_fname_array(std::string path) { FILE* fp = fopen(path.c_str(), "w"); fprintf(fp, "std::string BMW_LF[336][4] = \n"); fprintf(fp, "{\n"); for (int i = 0; i < 336; i++) { fprintf(fp, "\t{\n"); int f, r, b, l; find_LF_number_BMW(f, r, b, l, i); fprintf(fp, "\t\t\"Row%d\",\n", f); fprintf(fp, "\t\t\"Column%d\",\n", r); fprintf(fp, "\t\t\"Row%d\",\n", b); fprintf(fp, "\t\t\"Column%d\"\n", l); if (i == 336) fprintf(fp, "\t}\n"); else fprintf(fp, "\t},\n"); } fprintf(fp, "};"); fclose(fp); } size_t get_devmem_freespace() { size_t free, total; hipMemGetInfo(&free, &total); return free; } size_t get_devmem_totalpace() { size_t free, total; hipMemGetInfo(&free, &total); return total; } void query_CudaMemory() { size_t free, total; free = get_devmem_freespace(); total = get_devmem_totalpace(); hipMemGetInfo(&free, &total); printf("%d KB free of total %d KB\n", free / 1024, total / 1024); } void constructLF_interlace() { // BMW LF configuration.xlsx /* unsigned char* LF_odd = new unsigned char[HEIGTH*WIDTH / 2 * 3]; unsigned char* LF_even = new unsigned char[HEIGTH*WIDTH / 2 * 3]; char OUT_FILE_odd[128]; char OUT_FILE_even[128]; char LF_FILE[128]; //char LF_FILE[100] = "LFU360/191020/Asite/Row10/0001.jpg"; int N = 50; FILE* fp_odd; FILE* fp_even; for (int col = 1; col <= 392; col++) { //sprintf_s(OUT_FILE, sizeof(OUT_FILE), "LFU360/191025_output/Row%d.LF", count); sprintf_s(OUT_FILE_odd, sizeof(OUT_FILE_odd), "E:/BMW_4K/Column%d_odd.bgr", col - 1); sprintf_s(OUT_FILE_even, sizeof(OUT_FILE_even), "E:/BMW_4K/Column%d_even.bgr", col - 1); fopen_s(&fp_odd, OUT_FILE_odd, "wb"); fopen_s(&fp_even, OUT_FILE_even, "wb"); for (int no = 1; no <= N; no++) { if (no < 10) { sprintf_s(LF_FILE, sizeof(LF_FILE), "F:/BMW/Column%d/000%d.jpg", col, no); } else if (no < 100) { sprintf_s(LF_FILE, sizeof(LF_FILE), "F:/BMW/Column%d/00%d.jpg", col, no); } else { sprintf_s(LF_FILE, sizeof(LF_FILE), "F:/BMW/Column%d/0%d.jpg", col, no); } cv::Mat src_img = cv::imread(LF_FILE); cv::Mat img; resize(src_img, img, Size(TARGET_W, TARGET_H)); for (int w = 0; w < TARGET_W; w++) { for (int h = 0; h < TARGET_H / 2; h++) { LF_odd[w * TARGET_H / 2 * 3 + h * 3 + 0] = img.at<Vec3b>(2 * h, w)[0]; LF_odd[w * TARGET_H / 2 * 3 + h * 3 + 1] = img.at<Vec3b>(2 * h, w)[1]; LF_odd[w * TARGET_H / 2 * 3 + h * 3 + 2] = img.at<Vec3b>(2 * h, w)[2]; LF_even[w * TARGET_H / 2 * 3 + h * 3 + 0] = img.at<Vec3b>(2 * h + 1, w)[0]; LF_even[w * TARGET_H / 2 * 3 + h * 3 + 1] = img.at<Vec3b>(2 * h + 1, w)[1]; LF_even[w * TARGET_H / 2 * 3 + h * 3 + 2] = img.at<Vec3b>(2 * h + 1, w)[2]; } } fwrite(LF_odd, 1, TARGET_W* TARGET_H / 2 * 3, fp_odd); fwrite(LF_even, 1, TARGET_W* TARGET_H / 2 * 3, fp_even); } printf("Read col %d IMAGE..\n", col); fclose(fp_odd); fclose(fp_even); } for (int row = 1; row <= 342; row++) { int newRowNum = 336 - (6 * ((row - 1) / 6)) + ((row - 1) % 6) + 392; //sprintf_s(OUT_FILE, sizeof(OUT_FILE), "LFU360/191025_output/Row%d.LF", count); sprintf_s(OUT_FILE_odd, sizeof(OUT_FILE_odd), "E:/BMW_4K/Row%d_odd.bgr", newRowNum); sprintf_s(OUT_FILE_even, sizeof(OUT_FILE_even), "E:/BMW_4K/Row%d_even.bgr", newRowNum); fopen_s(&fp_odd, OUT_FILE_odd, "wb"); fopen_s(&fp_even, OUT_FILE_even, "wb"); for (int no = 1; no <= N; no++) { if (no < 10) { sprintf_s(LF_FILE, sizeof(LF_FILE), "F:/BMW/Row%d/000%d.jpg", row, no); } else if (no < 100) { sprintf_s(LF_FILE, sizeof(LF_FILE), "F:/BMW/Row%d/00%d.jpg", row, no); } else { sprintf_s(LF_FILE, sizeof(LF_FILE), "F:/BMW/Row%d/0%d.jpg", row, no); } cv::Mat src_img = cv::imread(LF_FILE); cv::Mat img; resize(src_img, img, Size(TARGET_W, TARGET_H)); for (int w = 0; w < TARGET_W; w++) { for (int h = 0; h < TARGET_H / 2; h++) { LF_odd[w * TARGET_H / 2 * 3 + h * 3 + 0] = img.at<Vec3b>(2 * h, w)[0]; LF_odd[w * TARGET_H / 2 * 3 + h * 3 + 1] = img.at<Vec3b>(2 * h, w)[1]; LF_odd[w * TARGET_H / 2 * 3 + h * 3 + 2] = img.at<Vec3b>(2 * h, w)[2]; LF_even[w * TARGET_H / 2 * 3 + h * 3 + 0] = img.at<Vec3b>(2 * h + 1, w)[0]; LF_even[w * TARGET_H / 2 * 3 + h * 3 + 1] = img.at<Vec3b>(2 * h + 1, w)[1]; LF_even[w * TARGET_H / 2 * 3 + h * 3 + 2] = img.at<Vec3b>(2 * h + 1, w)[2]; } } fwrite(LF_odd, 1, TARGET_W* TARGET_H / 2 * 3, fp_odd); fwrite(LF_even, 1, TARGET_W* TARGET_H / 2 * 3, fp_even); } printf("Read row %d IMAGE..\n", newRowNum); fclose(fp_odd); fclose(fp_even); } */ } int mround(int n, int m) { return ((int)(n + ((double)m / 2.0))) / m * m; } double differentiation(double prev, double cur, double timespan) { return (cur - prev) / timespan; } __device__ int dev_SignBitMasking(int l, int r) { return !!((l - r) & 0x80000000); // if l < r : return 1 } __device__ int dev_Clamp(int val, int min, int max) { return !dev_SignBitMasking(val, max) * max + dev_SignBitMasking(val, max) * (dev_SignBitMasking(val, min) * min + !dev_SignBitMasking(val, min) * val); } __device__ float dev_rad2deg(float rad) { return (rad * 180.0f / 3.14159274f); } __device__ float dev_deg2rad(float deg) { return (deg * 3.14159274f / 180.0f); } __device__ int dev_getLFUID(const int& posX, const int& posY) { return 56 * (posX / 100) + (posY / 100); } __device__ int dev_find_LF_number_BMW(const int& direction, const int& posX, const int& posY) { int LFUID = dev_getLFUID(posX, posY); switch (direction) { case 0: return (LFUID / 56) + (6 * (LFUID % 56)) + 392 + 6; break; case 1: return LFUID + 56; break; case 2: return (LFUID / 56) + (6 * (LFUID % 56)) + 392; break; case 3: return LFUID; break; default: return -1; break; } }
66bc505b4b828abe925542ae0d03cc8468def19d.cu
#include "LF_Utils.cuh" uint8_t* alloc_uint8(size_t size, std::string alloc_type) { cudaError_t err; uint8_t* buf; if (alloc_type == "pinned") { err = cudaMallocHost((void**)&buf, size); assert(err == cudaSuccess); memset(buf, 0, size); } else if (alloc_type == "pageable") { buf = new uint8_t[size](); memset(buf, 0, size); } else if (alloc_type == "device") { err = cudaMalloc((void**)&buf, size); err = cudaMemset(buf, 0, size); assert(err == cudaSuccess); } else if (alloc_type == "unified") { err = cudaMallocManaged((void**)&buf, size); assert(err == cudaSuccess); } else exit(1); return buf; } void free_uint8(uint8_t* buf, std::string alloc_type) { cudaError_t err; if (alloc_type == "pinned") { err = cudaFreeHost(buf); assert(err == cudaSuccess); } else if (alloc_type == "pageable") { delete[] buf; } else if (alloc_type == "device" || alloc_type == "unified") { err = cudaFree(buf); assert(err == cudaSuccess); } else exit(1); } int read_uint8(uint8_t* buf, std::string filename, size_t size) { int fd; int ret; fd = open(filename.c_str(), O_RDONLY | O_BINARY); ret = fd; if (ret < 0) { printf("open failed, %s\n", filename.c_str()); assert(ret == 0); exit(1); } if (size < 0) { if ((ret = lseek(fd, 0, SEEK_END)) < 0) { printf("SEEK_END failed, %s\n", filename.c_str()); assert(ret == 0); exit(1); } if ((ret = tell(fd)) < 0) { printf("tell failed, %s\n", filename.c_str()); assert(ret == 0); exit(1); } size = ret; if ((ret = lseek(fd, 0, SEEK_SET)) < 0) { printf("SEEK_SET failed, %s\n", filename.c_str()); assert(ret == 0); exit(1); } } ret = read(fd, buf, sizeof(uint8_t) * size); // x64 close(fd); if (ret != size) { printf("read failed, %s\n", filename.c_str()); assert(ret == size); exit(1); } return ret; } int write_uint8(uint8_t* buf, std::string filename, size_t size) { int fd; if ((fd = open(filename.c_str(), O_WRONLY | O_BINARY)) < 0) return fd; if (size < 0) size = _msize(buf); int ret = write(fd, buf, sizeof(uint8_t) * size); // x64 close(fd); return ret; } void StopWatch::Start() { t0 = std::chrono::high_resolution_clock::now(); } double StopWatch::Stop() { double stop = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now().time_since_epoch() - t0.time_since_epoch()).count(); return stop / 1000000.0; // ms } double getEuclideanDist(int x, int y, int origX, int origY) { return sqrt(pow(((double)x - (double)origX), 2) + pow(((double)y - (double)origY), 2)); } int clamp(int val, int min, int max) { if (val > max) return max; else if (val < min) return min; else return val; } double rad2deg(double rad) { return (rad * 180.0 / PI); } double deg2rad(double deg) { return (deg * PI / 180.0); } float deg2rad(float deg) { return (deg * PI / 180.0f); } void minmax(int val, int& min, int& max) { min = (val < min) ? val : min; max = (val > max) ? val : max; } int getKey(int& posX, int& posY) { // printf("%d, %d -> ", posX, posY); int c = getch(); switch (c) { case 'x': { posY--; } break; case 'c': { posX++; posY--; } break; case 'd': { posX++; } break; case 'e': { posX++; posY++; } break; case 'w': { posY++; } break; case 'q': { posX--; posY++; } break; case 'a': { posX--; } break; case 'z': { posX--; posY--; } break; case 27: { printf("Terminate\n"); return -1; } default: break; } posX = clamp(posX, 201, 299); posY = clamp(posY, 201, 299); return 0; } std::string IntToFormattedString(int n) { if (n < 10) return ("000" + std::to_string(n)); else if (n < 100) return ("00" + std::to_string(n)); else if (n < 1000) return ("0" + std::to_string(n)); else return (std::to_string(n)); } std::string FloatToFormattedString(float f) { if (f < 10) return ("000" + std::to_string(f)); else if (f < 100) return ("00" + std::to_string(f)); else if (f < 1000) return ("0" + std::to_string(f)); else return (std::to_string(f)); } int preRendering(int x, int z) { int width = 4096; std::pair<int, int> range[4][50]; for (int i = 0; i < 4; i++) { for (int j = 0; j < 50; j++) { range[i][j].first = 1e6; range[i][j].second = -1e6; } } int DATAW = 50; int LFUW = 100; int Y = LFUW / 2; for (int dir = 0; dir < 4; dir++) { int localPosX_front = ((x % 100) - 50); int localPosZ_front = ((z % 100) - 50); int localPosX = localPosX_front; int localPosZ = localPosZ_front; if (dir == 1) { localPosX = -1 * localPosZ_front; localPosZ = localPosX_front; } else if (dir == 2) { localPosX = -1 * localPosX_front; localPosZ = -1 * localPosZ_front; } else if (dir == 3) { localPosX = localPosZ_front; localPosZ = -1 * localPosX_front; } float theta_L = rad2deg(atan2f((-1.0f * LFUW / 2 - localPosX), (LFUW / 2 - localPosZ))); float theta_R = rad2deg(atan2f((1.0f * LFUW / 2 - localPosX), (LFUW / 2 - localPosZ))); int output_width = (int)((theta_R - theta_L) / 0.04f); for (int w = 0; w < output_width; w++) { float theta_P = theta_L + (0.04f * (float)w); // 가져올 ray가 front와 이루는 각 (rad) // float xP = x0 + z0 * tanf(deg2rad(theta_P)); // tan -> 구간 내에서 odd function float xP = (float)(Y - localPosZ) * tanf(deg2rad(theta_P)) + localPosX; // tan -> 구간 내에서 odd function float b = sqrtf(2.0f) * LFUW; float N_dist = sqrt((float)((xP - localPosX) * (xP - localPosX) + (Y - localPosZ) * (Y - localPosZ))) / b; xP /= 2; int P_1 = (int)roundf(xP + (DATAW / 2)); if (dir == 1 || dir == 2) P_1 = DATAW - P_1 - 1; P_1 = clamp(P_1, 0, DATAW - 1); float U = (theta_P * ((1.0f) / (180.0f))) * width / 2 + width / 2; int U_1 = (int)(roundf(U)); if (dir == 1) U_1 += width / 4; if (dir == 2) U_1 += width / 2; if (dir == 3) U_1 -= width / 4; if (U_1 >= width) U_1 = U_1 - width; else if (U_1 < 0) U_1 = U_1 + width; U_1 = clamp(U_1, 0, width - 1); range[dir][P_1].first = U_1 < range[dir][P_1].first ? U_1 : range[dir][P_1].first; range[dir][P_1].second = U_1 > range[dir][P_1].second ? U_1 : range[dir][P_1].second; } } std::string filename = "S:/PixelRange/" + std::to_string(x) + "_" + std::to_string(z) + ".txt"; FILE* fp = fopen(filename.c_str(), "w"); for (int i = 0; i < 4; i++) { for (int j = 0; j < 50; j++) { if (range[i][j].first != 1e6 || range[i][j].second != -1e6) fprintf(fp, "%d\t%d\t%d\t%d\n", i, j, range[i][j].first, range[i][j].second); } } fclose(fp); return 0; } void write_rendering_range() { for (int y = 0; y < 100; y++) { for (int x = 0; x < 100; x++) { preRendering(x, y); } } } int getLFUID(const int& posX, const int& posY) { return 56 * (posX / 100) + (posY / 100); } void find_LF_number_BMW(int& front, int& right, int& back, int& left, const int& LFUID) { // 56x6 LFU left = LFUID; right = left + 56; back = (LFUID / 56) + (6 * (LFUID % 56)) + 392; front = back + 6; } void getLocalPosition(int& localPosX, int& localPosY, const int& curPosX, const int& curPosY) { localPosX = curPosX % 100 - 50; localPosY = curPosY % 100 - 50; } void write_bmw_fname_array(std::string path) { FILE* fp = fopen(path.c_str(), "w"); fprintf(fp, "std::string BMW_LF[336][4] = \n"); fprintf(fp, "{\n"); for (int i = 0; i < 336; i++) { fprintf(fp, "\t{\n"); int f, r, b, l; find_LF_number_BMW(f, r, b, l, i); fprintf(fp, "\t\t\"Row%d\",\n", f); fprintf(fp, "\t\t\"Column%d\",\n", r); fprintf(fp, "\t\t\"Row%d\",\n", b); fprintf(fp, "\t\t\"Column%d\"\n", l); if (i == 336) fprintf(fp, "\t}\n"); else fprintf(fp, "\t},\n"); } fprintf(fp, "};"); fclose(fp); } size_t get_devmem_freespace() { size_t free, total; cudaMemGetInfo(&free, &total); return free; } size_t get_devmem_totalpace() { size_t free, total; cudaMemGetInfo(&free, &total); return total; } void query_CudaMemory() { size_t free, total; free = get_devmem_freespace(); total = get_devmem_totalpace(); cudaMemGetInfo(&free, &total); printf("%d KB free of total %d KB\n", free / 1024, total / 1024); } void constructLF_interlace() { // BMW LF configuration.xlsx 참고 /* unsigned char* LF_odd = new unsigned char[HEIGTH*WIDTH / 2 * 3]; unsigned char* LF_even = new unsigned char[HEIGTH*WIDTH / 2 * 3]; char OUT_FILE_odd[128]; char OUT_FILE_even[128]; char LF_FILE[128]; //char LF_FILE[100] = "LFU360/191020/Asite/Row10/0001.jpg"; int N = 50; FILE* fp_odd; FILE* fp_even; for (int col = 1; col <= 392; col++) { //sprintf_s(OUT_FILE, sizeof(OUT_FILE), "LFU360/191025_output/Row%d.LF", count); sprintf_s(OUT_FILE_odd, sizeof(OUT_FILE_odd), "E:/BMW_4K/Column%d_odd.bgr", col - 1); sprintf_s(OUT_FILE_even, sizeof(OUT_FILE_even), "E:/BMW_4K/Column%d_even.bgr", col - 1); fopen_s(&fp_odd, OUT_FILE_odd, "wb"); fopen_s(&fp_even, OUT_FILE_even, "wb"); for (int no = 1; no <= N; no++) { if (no < 10) { sprintf_s(LF_FILE, sizeof(LF_FILE), "F:/BMW/Column%d/000%d.jpg", col, no); } else if (no < 100) { sprintf_s(LF_FILE, sizeof(LF_FILE), "F:/BMW/Column%d/00%d.jpg", col, no); } else { sprintf_s(LF_FILE, sizeof(LF_FILE), "F:/BMW/Column%d/0%d.jpg", col, no); } cv::Mat src_img = cv::imread(LF_FILE); cv::Mat img; resize(src_img, img, Size(TARGET_W, TARGET_H)); for (int w = 0; w < TARGET_W; w++) { for (int h = 0; h < TARGET_H / 2; h++) { LF_odd[w * TARGET_H / 2 * 3 + h * 3 + 0] = img.at<Vec3b>(2 * h, w)[0]; LF_odd[w * TARGET_H / 2 * 3 + h * 3 + 1] = img.at<Vec3b>(2 * h, w)[1]; LF_odd[w * TARGET_H / 2 * 3 + h * 3 + 2] = img.at<Vec3b>(2 * h, w)[2]; LF_even[w * TARGET_H / 2 * 3 + h * 3 + 0] = img.at<Vec3b>(2 * h + 1, w)[0]; LF_even[w * TARGET_H / 2 * 3 + h * 3 + 1] = img.at<Vec3b>(2 * h + 1, w)[1]; LF_even[w * TARGET_H / 2 * 3 + h * 3 + 2] = img.at<Vec3b>(2 * h + 1, w)[2]; } } fwrite(LF_odd, 1, TARGET_W* TARGET_H / 2 * 3, fp_odd); fwrite(LF_even, 1, TARGET_W* TARGET_H / 2 * 3, fp_even); } printf("Read col %d IMAGE..\n", col); fclose(fp_odd); fclose(fp_even); } for (int row = 1; row <= 342; row++) { int newRowNum = 336 - (6 * ((row - 1) / 6)) + ((row - 1) % 6) + 392; //sprintf_s(OUT_FILE, sizeof(OUT_FILE), "LFU360/191025_output/Row%d.LF", count); sprintf_s(OUT_FILE_odd, sizeof(OUT_FILE_odd), "E:/BMW_4K/Row%d_odd.bgr", newRowNum); sprintf_s(OUT_FILE_even, sizeof(OUT_FILE_even), "E:/BMW_4K/Row%d_even.bgr", newRowNum); fopen_s(&fp_odd, OUT_FILE_odd, "wb"); fopen_s(&fp_even, OUT_FILE_even, "wb"); for (int no = 1; no <= N; no++) { if (no < 10) { sprintf_s(LF_FILE, sizeof(LF_FILE), "F:/BMW/Row%d/000%d.jpg", row, no); } else if (no < 100) { sprintf_s(LF_FILE, sizeof(LF_FILE), "F:/BMW/Row%d/00%d.jpg", row, no); } else { sprintf_s(LF_FILE, sizeof(LF_FILE), "F:/BMW/Row%d/0%d.jpg", row, no); } cv::Mat src_img = cv::imread(LF_FILE); cv::Mat img; resize(src_img, img, Size(TARGET_W, TARGET_H)); for (int w = 0; w < TARGET_W; w++) { for (int h = 0; h < TARGET_H / 2; h++) { LF_odd[w * TARGET_H / 2 * 3 + h * 3 + 0] = img.at<Vec3b>(2 * h, w)[0]; LF_odd[w * TARGET_H / 2 * 3 + h * 3 + 1] = img.at<Vec3b>(2 * h, w)[1]; LF_odd[w * TARGET_H / 2 * 3 + h * 3 + 2] = img.at<Vec3b>(2 * h, w)[2]; LF_even[w * TARGET_H / 2 * 3 + h * 3 + 0] = img.at<Vec3b>(2 * h + 1, w)[0]; LF_even[w * TARGET_H / 2 * 3 + h * 3 + 1] = img.at<Vec3b>(2 * h + 1, w)[1]; LF_even[w * TARGET_H / 2 * 3 + h * 3 + 2] = img.at<Vec3b>(2 * h + 1, w)[2]; } } fwrite(LF_odd, 1, TARGET_W* TARGET_H / 2 * 3, fp_odd); fwrite(LF_even, 1, TARGET_W* TARGET_H / 2 * 3, fp_even); } printf("Read row %d IMAGE..\n", newRowNum); fclose(fp_odd); fclose(fp_even); } */ } int mround(int n, int m) { return ((int)(n + ((double)m / 2.0))) / m * m; } double differentiation(double prev, double cur, double timespan) { return (cur - prev) / timespan; } __device__ int dev_SignBitMasking(int l, int r) { return !!((l - r) & 0x80000000); // if l < r : return 1 } __device__ int dev_Clamp(int val, int min, int max) { return !dev_SignBitMasking(val, max) * max + dev_SignBitMasking(val, max) * (dev_SignBitMasking(val, min) * min + !dev_SignBitMasking(val, min) * val); } __device__ float dev_rad2deg(float rad) { return (rad * 180.0f / 3.14159274f); } __device__ float dev_deg2rad(float deg) { return (deg * 3.14159274f / 180.0f); } __device__ int dev_getLFUID(const int& posX, const int& posY) { return 56 * (posX / 100) + (posY / 100); } __device__ int dev_find_LF_number_BMW(const int& direction, const int& posX, const int& posY) { int LFUID = dev_getLFUID(posX, posY); switch (direction) { case 0: return (LFUID / 56) + (6 * (LFUID % 56)) + 392 + 6; break; case 1: return LFUID + 56; break; case 2: return (LFUID / 56) + (6 * (LFUID % 56)) + 392; break; case 3: return LFUID; break; default: return -1; break; } }
9b8247884add8e569f5ca239988fcc3aea3c08d0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017, Geometric Computation Group of Stanford University // The MIT License (MIT) // Copyright (c) 2017 Charles R. Qi // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include "cuda_util.h" // input: points(b, c, n) idx(b, m) // output: out(b, c, m) __global__ void gather_by_index_kernel(int b, int c, int n, int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int l = blockIdx.y; l < c; l += gridDim.y) { for (int j = threadIdx.x; j < m; j += blockDim.x) { int a = idx[i * m + j]; out[(i * c + l) * m + j] = points[(i * c + l) * n + a]; } } } } void gather_by_index_kernel_launcher(int b, int c, int n, int npoints, const float *points, const int *idx, float *out) { hipLaunchKernelGGL(( gather_by_index_kernel), dim3(dim3(b, c, 1)), dim3(opt_n_threads(npoints)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), b, c, n, npoints, points, idx, out); CUDA_CHECK_ERRORS(); } // input: grad_out(b, c, m) idx(b, m) // output: grad_points(b, c, n) __global__ void gather_by_index_grad_kernel(int b, int c, int n, int m, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int l = blockIdx.y; l < c; l += gridDim.y) { for (int j = threadIdx.x; j < m; j += blockDim.x) { int a = idx[i * m + j]; atomicAdd(grad_points + (i * c + l) * n + a, grad_out[(i * c + l) * m + j]); } } } } void gather_by_index_grad_kernel_launcher(int b, int c, int n, int npoints, const float *grad_out, const int *idx, float *grad_points) { hipLaunchKernelGGL(( gather_by_index_grad_kernel), dim3(dim3(b, c, 1)), dim3(opt_n_threads(npoints)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), b, c, n, npoints, grad_out, idx, grad_points); CUDA_CHECK_ERRORS(); } __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2) { const float v1 = dists[idx1], v2 = dists[idx2]; const int i1 = dists_i[idx1], i2 = dists_i[idx2]; dists[idx1] = max(v1, v2); dists_i[idx1] = v2 > v1 ? i2 : i1; } // Input dataset: (b, n, 3), tmp: (b, n) // Ouput idxs (b, m) template <unsigned int block_size> __global__ void furthest_point_sampling_kernel( int b, int n, int m, const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { if (m <= 0) return; __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; int batch_index = blockIdx.x; dataset += batch_index * n * 3; temp += batch_index * n; idxs += batch_index * m; int tid = threadIdx.x; const int stride = block_size; int old = 0; if (threadIdx.x == 0) idxs[0] = old; __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; float x1 = dataset[old * 3 + 0]; float y1 = dataset[old * 3 + 1]; float z1 = dataset[old * 3 + 2]; for (int k = tid; k < n; k += stride) { float x2, y2, z2; x2 = dataset[k * 3 + 0]; y2 = dataset[k * 3 + 1]; z2 = dataset[k * 3 + 2]; float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); if (mag <= 1e-3) continue; float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, temp[k]); temp[k] = d2; besti = d2 > best ? k : besti; best = d2 > best ? d2 : best; } dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idxs[j] = old; } } void furthest_point_sampling_kernel_launcher(int b, int n, int m, const float *dataset, float *temp, int *idxs) { unsigned int n_threads = opt_n_threads(n); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); switch (n_threads) { case 512: hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 256: hipLaunchKernelGGL(( furthest_point_sampling_kernel<256>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 128: hipLaunchKernelGGL(( furthest_point_sampling_kernel<128>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 64: hipLaunchKernelGGL(( furthest_point_sampling_kernel<64>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 32: hipLaunchKernelGGL(( furthest_point_sampling_kernel<32>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 16: hipLaunchKernelGGL(( furthest_point_sampling_kernel<16>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 8: hipLaunchKernelGGL(( furthest_point_sampling_kernel<8>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 4: hipLaunchKernelGGL(( furthest_point_sampling_kernel<4>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 2: hipLaunchKernelGGL(( furthest_point_sampling_kernel<2>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 1: hipLaunchKernelGGL(( furthest_point_sampling_kernel<1>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; default: hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); } CUDA_CHECK_ERRORS(); }
9b8247884add8e569f5ca239988fcc3aea3c08d0.cu
// Copyright (c) 2017, Geometric Computation Group of Stanford University // The MIT License (MIT) // Copyright (c) 2017 Charles R. Qi // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include "cuda_util.h" // input: points(b, c, n) idx(b, m) // output: out(b, c, m) __global__ void gather_by_index_kernel(int b, int c, int n, int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int l = blockIdx.y; l < c; l += gridDim.y) { for (int j = threadIdx.x; j < m; j += blockDim.x) { int a = idx[i * m + j]; out[(i * c + l) * m + j] = points[(i * c + l) * n + a]; } } } } void gather_by_index_kernel_launcher(int b, int c, int n, int npoints, const float *points, const int *idx, float *out) { gather_by_index_kernel<<<dim3(b, c, 1), opt_n_threads(npoints), 0, at::cuda::getCurrentCUDAStream()>>>( b, c, n, npoints, points, idx, out); CUDA_CHECK_ERRORS(); } // input: grad_out(b, c, m) idx(b, m) // output: grad_points(b, c, n) __global__ void gather_by_index_grad_kernel(int b, int c, int n, int m, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int l = blockIdx.y; l < c; l += gridDim.y) { for (int j = threadIdx.x; j < m; j += blockDim.x) { int a = idx[i * m + j]; atomicAdd(grad_points + (i * c + l) * n + a, grad_out[(i * c + l) * m + j]); } } } } void gather_by_index_grad_kernel_launcher(int b, int c, int n, int npoints, const float *grad_out, const int *idx, float *grad_points) { gather_by_index_grad_kernel<<<dim3(b, c, 1), opt_n_threads(npoints), 0, at::cuda::getCurrentCUDAStream()>>>( b, c, n, npoints, grad_out, idx, grad_points); CUDA_CHECK_ERRORS(); } __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2) { const float v1 = dists[idx1], v2 = dists[idx2]; const int i1 = dists_i[idx1], i2 = dists_i[idx2]; dists[idx1] = max(v1, v2); dists_i[idx1] = v2 > v1 ? i2 : i1; } // Input dataset: (b, n, 3), tmp: (b, n) // Ouput idxs (b, m) template <unsigned int block_size> __global__ void furthest_point_sampling_kernel( int b, int n, int m, const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { if (m <= 0) return; __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; int batch_index = blockIdx.x; dataset += batch_index * n * 3; temp += batch_index * n; idxs += batch_index * m; int tid = threadIdx.x; const int stride = block_size; int old = 0; if (threadIdx.x == 0) idxs[0] = old; __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; float x1 = dataset[old * 3 + 0]; float y1 = dataset[old * 3 + 1]; float z1 = dataset[old * 3 + 2]; for (int k = tid; k < n; k += stride) { float x2, y2, z2; x2 = dataset[k * 3 + 0]; y2 = dataset[k * 3 + 1]; z2 = dataset[k * 3 + 2]; float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); if (mag <= 1e-3) continue; float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, temp[k]); temp[k] = d2; besti = d2 > best ? k : besti; best = d2 > best ? d2 : best; } dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idxs[j] = old; } } void furthest_point_sampling_kernel_launcher(int b, int n, int m, const float *dataset, float *temp, int *idxs) { unsigned int n_threads = opt_n_threads(n); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); switch (n_threads) { case 512: furthest_point_sampling_kernel<512><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 256: furthest_point_sampling_kernel<256><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 128: furthest_point_sampling_kernel<128><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 64: furthest_point_sampling_kernel<64><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 32: furthest_point_sampling_kernel<32><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 16: furthest_point_sampling_kernel<16><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 8: furthest_point_sampling_kernel<8><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 4: furthest_point_sampling_kernel<4><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 2: furthest_point_sampling_kernel<2><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 1: furthest_point_sampling_kernel<1><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; default: furthest_point_sampling_kernel<512><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); } CUDA_CHECK_ERRORS(); }
dbbf0e518ef4dff980359e27c991d3831b4a9bb7.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include <hip/hip_runtime.h> #include "open3d/core/CUDAUtils.h" #include "open3d/core/Tensor.h" #include "open3d/core/kernel/CUDALauncher.cuh" #include "open3d/t/pipelines/kernel/ComputeTransformImpl.h" #include "open3d/t/pipelines/kernel/Reduction6x6Impl.cuh" #include "open3d/t/pipelines/kernel/TransformationConverter.h" namespace open3d { namespace t { namespace pipelines { namespace kernel { const int kThread1DUnit = 256; __global__ void ComputePosePointToPlaneCUDAKernel( const float *source_points_ptr, const float *target_points_ptr, const float *target_normals_ptr, const int64_t *correspondences_first, const int64_t *correspondences_second, const int n, float *global_sum) { __shared__ float local_sum0[kThread1DUnit]; __shared__ float local_sum1[kThread1DUnit]; __shared__ float local_sum2[kThread1DUnit]; const int tid = threadIdx.x; local_sum0[tid] = 0; local_sum1[tid] = 0; local_sum2[tid] = 0; const int workload_idx = threadIdx.x + blockIdx.x * blockDim.x; if (workload_idx >= n) return; float J[6] = {0}, reduction[21 + 6 + 2]; float r = 0; bool valid = GetJacobianPointToPlane(workload_idx, source_points_ptr, target_points_ptr, target_normals_ptr, correspondences_first, correspondences_second, J, r); // Dump J, r into JtJ and Jtr int offset = 0; for (int i = 0; i < 6; ++i) { for (int j = 0; j <= i; ++j) { reduction[offset++] = J[i] * J[j]; } } for (int i = 0; i < 6; ++i) { reduction[offset++] = J[i] * r; } reduction[offset++] = r * r; reduction[offset++] = valid; ReduceSum6x6LinearSystem<float, kThread1DUnit>(tid, valid, reduction, local_sum0, local_sum1, local_sum2, global_sum); } void ComputePosePointToPlaneCUDA(const float *source_points_ptr, const float *target_points_ptr, const float *target_normals_ptr, const int64_t *correspondences_first, const int64_t *correspondences_second, const int n, core::Tensor &pose, const core::Dtype &dtype, const core::Device &device) { core::Tensor global_sum = core::Tensor::Zeros({29}, core::Dtype::Float32, device); float *global_sum_ptr = global_sum.GetDataPtr<float>(); const dim3 blocks((n + kThread1DUnit - 1) / kThread1DUnit); const dim3 threads(kThread1DUnit); hipLaunchKernelGGL(( ComputePosePointToPlaneCUDAKernel), dim3(blocks), dim3(threads), 0, 0, source_points_ptr, target_points_ptr, target_normals_ptr, correspondences_first, correspondences_second, n, global_sum_ptr); OPEN3D_CUDA_CHECK(hipDeviceSynchronize()); // TODO (@rishabh), residual will be used for adding robust kernel support. float residual; int inlier_count; DecodeAndSolve6x6(global_sum, pose, residual, inlier_count); } } // namespace kernel } // namespace pipelines } // namespace t } // namespace open3d
dbbf0e518ef4dff980359e27c991d3831b4a9bb7.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include <cuda.h> #include "open3d/core/CUDAUtils.h" #include "open3d/core/Tensor.h" #include "open3d/core/kernel/CUDALauncher.cuh" #include "open3d/t/pipelines/kernel/ComputeTransformImpl.h" #include "open3d/t/pipelines/kernel/Reduction6x6Impl.cuh" #include "open3d/t/pipelines/kernel/TransformationConverter.h" namespace open3d { namespace t { namespace pipelines { namespace kernel { const int kThread1DUnit = 256; __global__ void ComputePosePointToPlaneCUDAKernel( const float *source_points_ptr, const float *target_points_ptr, const float *target_normals_ptr, const int64_t *correspondences_first, const int64_t *correspondences_second, const int n, float *global_sum) { __shared__ float local_sum0[kThread1DUnit]; __shared__ float local_sum1[kThread1DUnit]; __shared__ float local_sum2[kThread1DUnit]; const int tid = threadIdx.x; local_sum0[tid] = 0; local_sum1[tid] = 0; local_sum2[tid] = 0; const int workload_idx = threadIdx.x + blockIdx.x * blockDim.x; if (workload_idx >= n) return; float J[6] = {0}, reduction[21 + 6 + 2]; float r = 0; bool valid = GetJacobianPointToPlane(workload_idx, source_points_ptr, target_points_ptr, target_normals_ptr, correspondences_first, correspondences_second, J, r); // Dump J, r into JtJ and Jtr int offset = 0; for (int i = 0; i < 6; ++i) { for (int j = 0; j <= i; ++j) { reduction[offset++] = J[i] * J[j]; } } for (int i = 0; i < 6; ++i) { reduction[offset++] = J[i] * r; } reduction[offset++] = r * r; reduction[offset++] = valid; ReduceSum6x6LinearSystem<float, kThread1DUnit>(tid, valid, reduction, local_sum0, local_sum1, local_sum2, global_sum); } void ComputePosePointToPlaneCUDA(const float *source_points_ptr, const float *target_points_ptr, const float *target_normals_ptr, const int64_t *correspondences_first, const int64_t *correspondences_second, const int n, core::Tensor &pose, const core::Dtype &dtype, const core::Device &device) { core::Tensor global_sum = core::Tensor::Zeros({29}, core::Dtype::Float32, device); float *global_sum_ptr = global_sum.GetDataPtr<float>(); const dim3 blocks((n + kThread1DUnit - 1) / kThread1DUnit); const dim3 threads(kThread1DUnit); ComputePosePointToPlaneCUDAKernel<<<blocks, threads>>>( source_points_ptr, target_points_ptr, target_normals_ptr, correspondences_first, correspondences_second, n, global_sum_ptr); OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); // TODO (@rishabh), residual will be used for adding robust kernel support. float residual; int inlier_count; DecodeAndSolve6x6(global_sum, pose, residual, inlier_count); } } // namespace kernel } // namespace pipelines } // namespace t } // namespace open3d
6c020201bae801addc50177681d8f805fe2afc30.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <stdio.h> #include "common_hip.cuh" #include "math.h" //Const to set TILE_SIZE of the device const float TILE_SIZE = 1024; //Kernel method for vector addition __global__ void VectoraddKernel(float* Agpu, float* Bgpu, float* Cgpu, int size) { //Thread id int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < size) { Cgpu[tid] = Agpu[tid] + Bgpu[tid]; } } //Kernel method for vector subtraction __global__ void VectorsubtractKernel(float* Agpu, float* Bgpu, float* Cgpu, int size) { //Thread id int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < size) { Cgpu[tid] = Agpu[tid] - Bgpu[tid]; } } //Kernel method for vector scaling __global__ void VectorscaleKernel(float* Agpu, float* Cgpu, float scaling, int size) { //Thread id int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < size) { Cgpu[tid] = Agpu[tid] * scaling; } } // Method to allocate memory and invoke kernel method for vector addition bool addVectorGPU(float* M, float* N, float* P, int size) { int bytes = size * sizeof(float); float* Agpu, * Bgpu, * Cgpu; //Page lock memory mapping hipHostGetDevicePointer((void**)&Agpu, M, 0); hipHostGetDevicePointer((void**)&Bgpu, N, 0); hipHostGetDevicePointer((void**)&Cgpu, P, 0); //Set the block and grid dimens dim3 dimBlock(TILE_SIZE); dim3 dimGrid((int)ceil((float)size / (float)TILE_SIZE)); // Launch the kernel on a size-by-size block of threads VectoraddKernel << <dimGrid, dimBlock >> > (Agpu, Bgpu, Cgpu, size); hipDeviceSynchronize(); //Return error if any hipError_t status = hipGetLastError(); if (status != hipSuccess) { printf("Kernel failed: %s", hipGetErrorString(status)); return false; } return true; } // Method to allocate memory and invoke kernel method for vector subtraction bool subtractVectorGPU(float* M, float* N, float* P, int size) { int bytes = size * sizeof(float); float* Agpu, * Bgpu, * Cgpu; //Page lock memory mapping hipHostGetDevicePointer((void**)&Agpu, M, 0); hipHostGetDevicePointer((void**)&Bgpu, N, 0); hipHostGetDevicePointer((void**)&Cgpu, P, 0); //Set the block and grid dimens dim3 dimBlock(TILE_SIZE); dim3 dimGrid((int)ceil((float)size / (float)TILE_SIZE)); // Launch the kernel on a size-by-size block of threads VectorsubtractKernel << <dimGrid, dimBlock >> > (Agpu, Bgpu, Cgpu, size); hipDeviceSynchronize(); //Return error if any hipError_t status = hipGetLastError(); if (status != hipSuccess) { printf("Kernel failed: %s", hipGetErrorString(status)); return false; } return true; } // Method to allocate memory and invoke kernel method for vector scaling bool scaleVectorGPU(float* M, float* P, float scaling, int size) { int bytes = size * sizeof(float); float* Agpu, * Cgpu; //Page lock memory mapping hipHostGetDevicePointer((void**)&Agpu, M, 0); hipHostGetDevicePointer((void**)&Cgpu, P, 0); //Set the block and grid dimens dim3 dimBlock(TILE_SIZE); dim3 dimGrid((int)ceil((float)size / (float)TILE_SIZE)); // Launch the kernel on a size-by-size block of threads VectorscaleKernel << <dimGrid, dimBlock >> > (Agpu, Cgpu, scaling, size); hipDeviceSynchronize(); //Return error if any hipError_t status = hipGetLastError(); if (status != hipSuccess) { printf("Kernel failed: %s", hipGetErrorString(status)); return false; } return true; }
6c020201bae801addc50177681d8f805fe2afc30.cu
#include <cuda.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <stdio.h> #include "common.cuh" #include "math.h" //Const to set TILE_SIZE of the device const float TILE_SIZE = 1024; //Kernel method for vector addition __global__ void VectoraddKernel(float* Agpu, float* Bgpu, float* Cgpu, int size) { //Thread id int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < size) { Cgpu[tid] = Agpu[tid] + Bgpu[tid]; } } //Kernel method for vector subtraction __global__ void VectorsubtractKernel(float* Agpu, float* Bgpu, float* Cgpu, int size) { //Thread id int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < size) { Cgpu[tid] = Agpu[tid] - Bgpu[tid]; } } //Kernel method for vector scaling __global__ void VectorscaleKernel(float* Agpu, float* Cgpu, float scaling, int size) { //Thread id int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < size) { Cgpu[tid] = Agpu[tid] * scaling; } } // Method to allocate memory and invoke kernel method for vector addition bool addVectorGPU(float* M, float* N, float* P, int size) { int bytes = size * sizeof(float); float* Agpu, * Bgpu, * Cgpu; //Page lock memory mapping cudaHostGetDevicePointer((void**)&Agpu, M, 0); cudaHostGetDevicePointer((void**)&Bgpu, N, 0); cudaHostGetDevicePointer((void**)&Cgpu, P, 0); //Set the block and grid dimens dim3 dimBlock(TILE_SIZE); dim3 dimGrid((int)ceil((float)size / (float)TILE_SIZE)); // Launch the kernel on a size-by-size block of threads VectoraddKernel << <dimGrid, dimBlock >> > (Agpu, Bgpu, Cgpu, size); cudaThreadSynchronize(); //Return error if any cudaError_t status = cudaGetLastError(); if (status != cudaSuccess) { printf("Kernel failed: %s", cudaGetErrorString(status)); return false; } return true; } // Method to allocate memory and invoke kernel method for vector subtraction bool subtractVectorGPU(float* M, float* N, float* P, int size) { int bytes = size * sizeof(float); float* Agpu, * Bgpu, * Cgpu; //Page lock memory mapping cudaHostGetDevicePointer((void**)&Agpu, M, 0); cudaHostGetDevicePointer((void**)&Bgpu, N, 0); cudaHostGetDevicePointer((void**)&Cgpu, P, 0); //Set the block and grid dimens dim3 dimBlock(TILE_SIZE); dim3 dimGrid((int)ceil((float)size / (float)TILE_SIZE)); // Launch the kernel on a size-by-size block of threads VectorsubtractKernel << <dimGrid, dimBlock >> > (Agpu, Bgpu, Cgpu, size); cudaThreadSynchronize(); //Return error if any cudaError_t status = cudaGetLastError(); if (status != cudaSuccess) { printf("Kernel failed: %s", cudaGetErrorString(status)); return false; } return true; } // Method to allocate memory and invoke kernel method for vector scaling bool scaleVectorGPU(float* M, float* P, float scaling, int size) { int bytes = size * sizeof(float); float* Agpu, * Cgpu; //Page lock memory mapping cudaHostGetDevicePointer((void**)&Agpu, M, 0); cudaHostGetDevicePointer((void**)&Cgpu, P, 0); //Set the block and grid dimens dim3 dimBlock(TILE_SIZE); dim3 dimGrid((int)ceil((float)size / (float)TILE_SIZE)); // Launch the kernel on a size-by-size block of threads VectorscaleKernel << <dimGrid, dimBlock >> > (Agpu, Cgpu, scaling, size); cudaThreadSynchronize(); //Return error if any cudaError_t status = cudaGetLastError(); if (status != cudaSuccess) { printf("Kernel failed: %s", cudaGetErrorString(status)); return false; } return true; }
a64faa2fa65a9c349850c149915e3dd64f4303f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sobelEdgeDetectionSharedMem2(int *input, int *output, int width, int height, int thresh) { int regArr[4][4]; int i = (blockIdx.x * blockDim.x + threadIdx.x) * 2; int j = (blockIdx.y * blockDim.y + threadIdx.y) * 2; if ( i > 0 && j > 0 && i < width - 1 && j < height - 1) { regArr[0][0] = input[width * (j-1) + i - 1]; regArr[0][1] = input[width * (j-1) + i ]; regArr[0][2] = input[width * (j-1) + i + 1]; regArr[0][3] = input[width * (j-1) + i + 2]; regArr[1][0] = input[width * (j) + i - 1]; regArr[1][1] = input[width * (j) + i ]; regArr[1][2] = input[width * (j) + i + 1]; regArr[1][3] = input[width * (j) + i + 2]; regArr[2][0] = input[width * (j+1) + i - 1]; regArr[2][1] = input[width * (j+1) + i ]; regArr[2][2] = input[width * (j+1) + i + 1]; regArr[2][3] = input[width * (j+1) + i + 2]; regArr[3][0] = input[width * (j+2) + i - 1]; regArr[3][1] = input[width * (j+2) + i ]; regArr[3][2] = input[width * (j+2) + i + 1]; regArr[3][3] = input[width * (j+2) + i + 2]; __syncthreads(); int sum1 = 0, sum2 = 0, magnitude; int num = 3; for(int xind = 1; xind < num; xind++) { for(int yind = 1; yind < num; yind++) { sum1 = regArr[xind+1][yind-1] - regArr[xind-1][yind-1] + 2 * regArr[xind+1][yind ] - 2 * regArr[xind-1][yind ] + regArr[xind+1][yind+1] - regArr[xind-1][yind+1]; sum2 = regArr[xind-1][yind-1] + 2 * regArr[xind][yind-1] + regArr[xind+1][yind-1] - regArr[xind-1][yind+1] - 2 * regArr[xind][yind+1] - regArr[xind+1][yind+1]; magnitude = sum1 * sum1 + sum2 * sum2; if(magnitude > thresh) output[(j + yind - 1) * width + (i + xind - 1)] = 255; else output[(j + yind - 1) * width + (i + xind - 1)] = 0; } } } }
a64faa2fa65a9c349850c149915e3dd64f4303f3.cu
#include "includes.h" __global__ void sobelEdgeDetectionSharedMem2(int *input, int *output, int width, int height, int thresh) { int regArr[4][4]; int i = (blockIdx.x * blockDim.x + threadIdx.x) * 2; int j = (blockIdx.y * blockDim.y + threadIdx.y) * 2; if ( i > 0 && j > 0 && i < width - 1 && j < height - 1) { regArr[0][0] = input[width * (j-1) + i - 1]; regArr[0][1] = input[width * (j-1) + i ]; regArr[0][2] = input[width * (j-1) + i + 1]; regArr[0][3] = input[width * (j-1) + i + 2]; regArr[1][0] = input[width * (j) + i - 1]; regArr[1][1] = input[width * (j) + i ]; regArr[1][2] = input[width * (j) + i + 1]; regArr[1][3] = input[width * (j) + i + 2]; regArr[2][0] = input[width * (j+1) + i - 1]; regArr[2][1] = input[width * (j+1) + i ]; regArr[2][2] = input[width * (j+1) + i + 1]; regArr[2][3] = input[width * (j+1) + i + 2]; regArr[3][0] = input[width * (j+2) + i - 1]; regArr[3][1] = input[width * (j+2) + i ]; regArr[3][2] = input[width * (j+2) + i + 1]; regArr[3][3] = input[width * (j+2) + i + 2]; __syncthreads(); int sum1 = 0, sum2 = 0, magnitude; int num = 3; for(int xind = 1; xind < num; xind++) { for(int yind = 1; yind < num; yind++) { sum1 = regArr[xind+1][yind-1] - regArr[xind-1][yind-1] + 2 * regArr[xind+1][yind ] - 2 * regArr[xind-1][yind ] + regArr[xind+1][yind+1] - regArr[xind-1][yind+1]; sum2 = regArr[xind-1][yind-1] + 2 * regArr[xind][yind-1] + regArr[xind+1][yind-1] - regArr[xind-1][yind+1] - 2 * regArr[xind][yind+1] - regArr[xind+1][yind+1]; magnitude = sum1 * sum1 + sum2 * sum2; if(magnitude > thresh) output[(j + yind - 1) * width + (i + xind - 1)] = 255; else output[(j + yind - 1) * width + (i + xind - 1)] = 0; } } } }
20a737912dbd3a85a18f472009b1fdce4ad25cb7.hip
// !!! This is a file automatically generated by hipify!!! #include "external/book-professional-cuda-c-programming/CodeSamples/common/common.h" #include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> static void info() { // http://developer.download.nvidia.com/compute/cuda/2_3/toolkit/docs/online/structcudaDeviceProp.html int nDevices; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Total Global Memory (MBytes): %zu\n", prop.totalGlobalMem / 1024 / 1024); printf(" Warp Size: %d\n", prop.warpSize); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6); } } /* * This example demonstrates a simple vector sum on the GPU and on the host. * sumArraysOnGPU splits the work of the vector sum across CUDA threads on the * GPU. A 1D thread block and 1D grid are used. sumArraysOnHost sequentially * iterates through vector elements on the host. */ void initialData(float *ip, const int size) { int i; for (i = 0; i < size; i++) { ip[i] = (float) (rand() & 0xFF) / 10.0f; } return; } void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) { float *ia = A; float *ib = B; float *ic = C; for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) { ic[ix] = ia[ix] + ib[ix]; } ia += nx; ib += nx; ic += nx; } return; } void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("host %f gpu %f\n", hostRef[i], gpuRef[i]); break; } } if (match) printf("Arrays match.\n\n"); else printf("Arrays do not match.\n\n"); } // grid 1D block 1D __global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < nx) for (int iy = 0; iy < ny; iy++) { int idx = iy * nx + ix; MatC[idx] = MatA[idx] + MatB[idx]; } } int to_mb(int bytes) { return bytes / 1024 / 1024; } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); info(); size_t mem_tot_0 = 0; size_t mem_free_0 = 0; // size_t mem_tot_1 = 0; // size_t mem_free_1 = 0; // // out of mem // hipSetDevice(0); // hipDeviceSynchronize(); // hipDeviceSynchronize(); hipDeviceReset(); hipMemGetInfo(&mem_free_0, &mem_tot_0); std::cout << "Free memory before copy dev 0: " << mem_free_0 / 1024 / 1024 << std::endl; // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); // set up data size of matrix int nx = 1 << 12; //14; int ny = 1 << 12; // 14; int nxy = nx * ny; int nBytes = nxy * sizeof(float); printf("Matrix size: nx %d ny %d nMBytes %d\n", nx, ny, to_mb(nBytes)); // malloc host memory float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *) malloc(nBytes); h_B = (float *) malloc(nBytes); hostRef = (float *) malloc(nBytes); gpuRef = (float *) malloc(nBytes); // initialize data at host side double iStart = seconds(); initialData(h_A, nxy); initialData(h_B, nxy); double iElaps = seconds() - iStart; printf("initialize matrix elapsed %f sec\n", iElaps); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // add matrix at host side for result checks iStart = seconds(); sumMatrixOnHost(h_A, h_B, hostRef, nx, ny); iElaps = seconds() - iStart; printf("sumMatrixOnHost elapsed %f sec\n", iElaps); // malloc device global memory float *d_MatA, *d_MatB, *d_MatC; CHECK(hipMalloc((void ** )&d_MatA, nBytes)); CHECK(hipMalloc((void ** )&d_MatB, nBytes)); CHECK(hipMalloc((void ** )&d_MatC, nBytes)); // transfer data from host to device CHECK(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice)); // invoke kernel at host side int dimx = 32; dim3 block(dimx, 1); dim3 grid((nx + block.x - 1) / block.x, 1); iStart = seconds(); hipLaunchKernelGGL(( sumMatrixOnGPU1D), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx, ny); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; printf("sumMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x, grid.y, block.x, block.y, iElaps); // check kernel error CHECK(hipGetLastError()); // copy kernel result back to host side CHECK(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost)); // check device results checkResult(hostRef, gpuRef, nxy); // free device global memory CHECK(hipFree(d_MatA)); CHECK(hipFree(d_MatB)); CHECK(hipFree(d_MatC)); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); // reset device CHECK(hipDeviceReset()); return (0); }
20a737912dbd3a85a18f472009b1fdce4ad25cb7.cu
#include "external/book-professional-cuda-c-programming/CodeSamples/common/common.h" #include <cuda_runtime.h> #include <iostream> #include <stdio.h> static void info() { // http://developer.download.nvidia.com/compute/cuda/2_3/toolkit/docs/online/structcudaDeviceProp.html int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Total Global Memory (MBytes): %zu\n", prop.totalGlobalMem / 1024 / 1024); printf(" Warp Size: %d\n", prop.warpSize); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6); } } /* * This example demonstrates a simple vector sum on the GPU and on the host. * sumArraysOnGPU splits the work of the vector sum across CUDA threads on the * GPU. A 1D thread block and 1D grid are used. sumArraysOnHost sequentially * iterates through vector elements on the host. */ void initialData(float *ip, const int size) { int i; for (i = 0; i < size; i++) { ip[i] = (float) (rand() & 0xFF) / 10.0f; } return; } void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) { float *ia = A; float *ib = B; float *ic = C; for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) { ic[ix] = ia[ix] + ib[ix]; } ia += nx; ib += nx; ic += nx; } return; } void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("host %f gpu %f\n", hostRef[i], gpuRef[i]); break; } } if (match) printf("Arrays match.\n\n"); else printf("Arrays do not match.\n\n"); } // grid 1D block 1D __global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < nx) for (int iy = 0; iy < ny; iy++) { int idx = iy * nx + ix; MatC[idx] = MatA[idx] + MatB[idx]; } } int to_mb(int bytes) { return bytes / 1024 / 1024; } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); info(); size_t mem_tot_0 = 0; size_t mem_free_0 = 0; // size_t mem_tot_1 = 0; // size_t mem_free_1 = 0; // // out of mem // cudaSetDevice(0); // cudaDeviceSynchronize(); // cudaThreadSynchronize(); cudaDeviceReset(); cudaMemGetInfo(&mem_free_0, &mem_tot_0); std::cout << "Free memory before copy dev 0: " << mem_free_0 / 1024 / 1024 << std::endl; // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); // set up data size of matrix int nx = 1 << 12; //14; int ny = 1 << 12; // 14; int nxy = nx * ny; int nBytes = nxy * sizeof(float); printf("Matrix size: nx %d ny %d nMBytes %d\n", nx, ny, to_mb(nBytes)); // malloc host memory float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *) malloc(nBytes); h_B = (float *) malloc(nBytes); hostRef = (float *) malloc(nBytes); gpuRef = (float *) malloc(nBytes); // initialize data at host side double iStart = seconds(); initialData(h_A, nxy); initialData(h_B, nxy); double iElaps = seconds() - iStart; printf("initialize matrix elapsed %f sec\n", iElaps); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // add matrix at host side for result checks iStart = seconds(); sumMatrixOnHost(h_A, h_B, hostRef, nx, ny); iElaps = seconds() - iStart; printf("sumMatrixOnHost elapsed %f sec\n", iElaps); // malloc device global memory float *d_MatA, *d_MatB, *d_MatC; CHECK(cudaMalloc((void ** )&d_MatA, nBytes)); CHECK(cudaMalloc((void ** )&d_MatB, nBytes)); CHECK(cudaMalloc((void ** )&d_MatC, nBytes)); // transfer data from host to device CHECK(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice)); // invoke kernel at host side int dimx = 32; dim3 block(dimx, 1); dim3 grid((nx + block.x - 1) / block.x, 1); iStart = seconds(); sumMatrixOnGPU1D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; printf("sumMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x, grid.y, block.x, block.y, iElaps); // check kernel error CHECK(cudaGetLastError()); // copy kernel result back to host side CHECK(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost)); // check device results checkResult(hostRef, gpuRef, nxy); // free device global memory CHECK(cudaFree(d_MatA)); CHECK(cudaFree(d_MatB)); CHECK(cudaFree(d_MatC)); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); // reset device CHECK(cudaDeviceReset()); return (0); }
e5a197a50b50e315bfb86914610004d9b8850320.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* // Cython function from 'thinc' library class NumpyOps(Ops): def mean_pool(self, float[:, ::1] X, int[::1] lengths): cdef int B = lengths.shape[0] cdef int O = X.shape[1] cdef int T = X.shape[0] cdef Pool mem = Pool() means = <float*>mem.alloc(B * O, sizeof(float)) cpu_mean_pool(means, &X[0, 0], &lengths[0], B, T, O) return cpu_floats_ptr2array(means, (B, O)) cdef void cpu_mean_pool(float* means__bo, const float* X__to, const int* lengths__b, int B, int T, int O) nogil: '''Compute means of a batch of concatenated sequences, using the lengths.''' cdef float scale = 0. for length in lengths__b[:B]: scale = 1. / length for _ in range(length): VecVec.add_i(means__bo, X__to, scale, O) X__to += O means__bo += O */ // hardcoded the shared memory to 256 but we can easily change the host to invoke // the kernel to dynamically allocate the shared memory (according to vector dimensions) void __global__ mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims) { int bid = blockIdx.x; __shared__ float local_means[256]; for(int step = bid; step < numdocs; step += gridDim.x ) { int wordsInDoc = lengths[step]; int blockStarts = prevLengths[step]*dims; local_means[threadIdx.x] = 0.0; for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims) local_means[threadIdx.x] += words[i]; __syncthreads(); means[step*dims + threadIdx.x] = local_means[threadIdx.x]/(float)wordsInDoc; } }
e5a197a50b50e315bfb86914610004d9b8850320.cu
/* // Cython function from 'thinc' library class NumpyOps(Ops): def mean_pool(self, float[:, ::1] X, int[::1] lengths): cdef int B = lengths.shape[0] cdef int O = X.shape[1] cdef int T = X.shape[0] cdef Pool mem = Pool() means = <float*>mem.alloc(B * O, sizeof(float)) cpu_mean_pool(means, &X[0, 0], &lengths[0], B, T, O) return cpu_floats_ptr2array(means, (B, O)) cdef void cpu_mean_pool(float* means__bo, const float* X__to, const int* lengths__b, int B, int T, int O) nogil: '''Compute means of a batch of concatenated sequences, using the lengths.''' cdef float scale = 0. for length in lengths__b[:B]: scale = 1. / length for _ in range(length): VecVec.add_i(means__bo, X__to, scale, O) X__to += O means__bo += O */ // hardcoded the shared memory to 256 but we can easily change the host to invoke // the kernel to dynamically allocate the shared memory (according to vector dimensions) void __global__ mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims) { int bid = blockIdx.x; __shared__ float local_means[256]; for(int step = bid; step < numdocs; step += gridDim.x ) { int wordsInDoc = lengths[step]; int blockStarts = prevLengths[step]*dims; local_means[threadIdx.x] = 0.0; for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims) local_means[threadIdx.x] += words[i]; __syncthreads(); means[step*dims + threadIdx.x] = local_means[threadIdx.x]/(float)wordsInDoc; } }
678ef0b6c4ae7857c4c68126aaee4fa17a85e4a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include "hipcub/hipcub.hpp" #include "k2/csrc/array_ops.h" #include "k2/csrc/macros.h" #include "k2/csrc/math.h" #include "k2/csrc/moderngpu_allocator.h" #include "k2/csrc/nvtx.h" #include "k2/csrc/utils.h" #include "moderngpu/kernel_load_balance.hxx" #include "moderngpu/kernel_sortedsearch.hxx" namespace k2 { // See FillValues() where this is invoked. It fills a region with // a constant value. __global__ void FillValuesKernel(int32_t *data, int32_t num_values, int32_t value) { int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x), stride = (gridDim.x * blockDim.x); for (; job_idx < num_values; job_idx += stride) data[job_idx] = value; } // This launches a kernel. It's the same as doing: // for (int32_t i = 0; i < num_values; i++) data[i] = value; __device__ void FillValues(int32_t *data, int32_t num_values, int32_t value) { int32_t block_size = 256; int32_t grid_size = NumBlocks(num_values, block_size); hipLaunchKernelGGL(( FillValuesKernel), dim3(grid_size), dim3(block_size), 0, 0, data, num_values, value); } // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void RowSplitsToRowIdsKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (row_length / threads_per_row > max_loop) { // We decide that looping too many times will be too slow, so we launch // another kernel to fill in the value for this row. (This is CUDA dynamic // parallelism). if (thread_this_row == 0) { FillValues(row_ids + this_row_split, row_length, row); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_row < row_length; thread_this_row += threads_per_row) row_ids[this_row_split + thread_this_row] = row; } } /* See declaration of RowSplitsToRowIds() in utils.h. These are implementation notes. Suppose the range we need to fill with a particular number (say, x) is from 1010 to 10000 inclusive (binary) The first kernel writes x to positions 1010, 1100, 10000; the significance of that sequence is we keep adding the smallest number we can add to get another zero at the end of the binary representation, until we exceed the range we're supposed to fill. The second kernel: for a given index into x that is must fill (say, 1111), it asks "is the index currently here already the right one?", which it can test using the function is_valid_index() below; if it's not already correct, it searches in a sequence of positions: 1110, 1100, 1000, 0000, like our sequence above but going downwards, again getting more zeros at the end of the binary representation, until it finds the correct value in the array at the searched position; then it copies the discovered value the original position requested (here, 1111). First kernel pseudocode: for each index 'i' into 't', it does: for (int32_t n=0, j = t[i]; j < t[i+1]; n++) { x[j] = i; if (j & (1<<n)) j += (1 << n); } Second kernel pseudocode: for each element of x, it searches for the right index. Suppose we're given num_indexes == length(n) == length(t) - 1. Define is_valid_index as follows: // returns true if j is the value that we should be putting at position 'i' in x: // that is, if t[j] <= i < t[j+1]. bool is_valid_index(i, j) { return (j >= 0 && j < num_indexes && t[j] <= i && i < t[j+1]); } // We suppose we are given i (the position into x that we're responsible for // setting: orig_i = i; for (int32_t n=0; !is_valid_index(i, x[i]); n++) { if (i & (1<<n)) i -= (1 << n); } x[orig_i] = x[i]; */ void RowSplitsToRowIds(ContextPtr c, int32_t num_rows, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { NVTX_RANGE(K2_FUNC); if (num_rows <= 0 || num_elems <= 0) return; DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row_start = row_splits[0]; K2_CHECK_EQ(cur_row_start, 0); K2_CHECK_EQ(row_splits[num_rows], num_elems); for (int32_t row = 0; row < num_rows; ++row) { int32_t next_row_start = row_splits[row + 1]; for (; cur_row_start < next_row_start; ++cur_row_start) row_ids[cur_row_start] = row; } } else { K2_CHECK_EQ(d, kCuda); if (1) { #if 1 mgpu::context_t *mgpu_allocator = GetModernGpuAllocator(c); mgpu::load_balance_search(num_elems, row_splits, num_rows, row_ids, *mgpu_allocator); #elif 0 auto lambda_set_minus_1 = [=] __device__(int32_t i) -> void { row_ids[i] = -1; }; EvalDevice(c, num_elems, lambda_set_minus_1); auto lambda_set_row_ids_start = [=] __device__(int32_t i) -> void { if (row_splits[i + 1] > row_splits[i]) row_ids[row_splits[i]] = i; }; EvalDevice(c, num_rows, lambda_set_row_ids_start); size_t temp_storage_bytes; hipcub::DeviceScan::InclusiveScan(nullptr, temp_storage_bytes, row_ids, row_ids, MaxOp<int32_t>(), num_elems, c->GetCudaStream()); Array1<int8_t> d_temp_storage(c, temp_storage_bytes); hipcub::DeviceScan::InclusiveScan(d_temp_storage.Data(), temp_storage_bytes, row_ids, row_ids, MaxOp<int32_t>(), num_elems, c->GetCudaStream()); #else // TODO: compare this for speed with the other branch. This is branch is // much simpler, and will be considerably faster for "normal" cases -> // probably preferred. int32_t avg_elems_per_row = (num_elems + num_rows - 1) / num_rows, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_rows * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(RowSplitsToRowIdsKernel), dim3(grid_size), dim3(block_size), 0, c->GetCudaStream(), num_rows, threads_per_row, row_splits, num_elems, row_ids)); #endif } else { // TODO: Will probably just delete this branch at some point. // The following algorithm isn't particularly adapted to GPU hardware in // terms of coalesced reads and writes and so on, but it has reasonable // asymptotic time complexity (assuming all kernels run in parallel), // specifically: O(log(largest(row_splits[i+1]-row_splits[i]))) K2_EVAL( c, num_elems + 1, lambda_init_minus_one, (int32_t i)->void { row_ids[i] = -1; }); K2_EVAL( c, num_elems + 1, lambda_phase_one, (int32_t i)->void { int32_t this_row_split = row_splits[i], next_row_split = (i < num_rows ? row_splits[i + 1] : this_row_split + 1); if (this_row_split < next_row_split) row_ids[this_row_split] = i; // we have to fill in row_ids[this_row_split], // row_ids[this_row_split+1]... row_ids[next_row_split-1] with the // same value but that could be a long loop. Instead we write at // this_row_split and all indexes this_row_split < i < // next_row_split such that i is the result of rounding up // this_row_split to (something)*2^n, for n = 1, 2, 3, ... this will // take time logarithmic in (next_row_split - this_row_split). we // can then fill in the gaps with a logarithmic-time loop, by // looking for a value that's not (-1) by rounding the current index // down to successively higher powers of 2. for (int32_t power = 0, j = this_row_split; j + (1 << power) < next_row_split; power++) { if (j & (1 << power)) { j += (1 << power); // we know that j is now < next_row_split, because we checked "j // + (1<<power) < next_row_split" in the loop condition. Note, // we don't want a loop-within-a-loop because of how SIMT // works... row_ids[j] = i; } } }); // could do the next line for num_elems+1, but the element at `num_elems` // will already be set. K2_EVAL( c, num_elems, lambda_phase_two, (int32_t j)->void { int32_t row_index = row_ids[j]; if (row_index != -1) return; int32_t power = 0, j2 = j; for (; row_index != -1; power++) { if (j2 & (1 << power)) { j2 -= (1 << power); row_index = row_ids[j2]; } assert(power < 31); } row_ids[j] = row_ids[j2]; }); } } } /* When we invoke this we make a big enough grid that there doesn't have to be a loop over elements, i.e. (gridDim.x * blockDim.x) / threads_per_elem > num_elems. (must be >=, because we imagine a phantom element at [num_elems] with the value `num_rows`.) @param [in] num_elems Number of elements in ragged matrix @param [in] threads_per_elem Number of threads we allocate per element. Must be >= 1. @param [in] row_ids The row_ids vector, of length `num_elems`; must be nonnegative and non-decreasing and all elements < num_rows. @param [in] num_rows Number of rows, must be greater than the largest (== last) element of `row_ids`. @param [out] row_splits This kernel will output a non-decreasing vector of length num_rows + 1, such that row_splits[0] == 0, row_splits[num_rows] == num_elems, and row_splits[row_ids[i]] <= i < row_splits[row_ids[i]+1] */ __global__ void RowIdsToRowSplitsKernel(int32_t num_elems, int32_t threads_per_elem, const int32_t *row_ids, int32_t num_rows, int32_t *row_splits) { int32_t thread = (blockIdx.x * blockDim.x + threadIdx.x), num_threads = gridDim.x * blockDim.x, elem = thread / threads_per_elem, thread_this_elem = thread % threads_per_elem; K2_CHECK_GE(num_threads / threads_per_elem, num_elems); if (elem > num_elems) return; int32_t this_row, prev_row; if (elem == 0) { prev_row = -1; this_row = row_ids[elem]; } else if (elem == num_elems) { prev_row = row_ids[elem - 1]; this_row = num_rows; } else { prev_row = row_ids[elem - 1]; this_row = row_ids[elem]; } // `num_splits` is the number of splits we have to write, usually 0 or 1 // but in principle unlimited as there could be empty rows. The // relationship between row_ids and row_splits is more symmetric than // you might expect. int32_t num_splits = this_row - prev_row; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (num_splits / threads_per_elem > max_loop) { if (thread_this_elem == 0) { FillValues(row_splits + prev_row + 1, num_splits, elem); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_elem < num_splits; thread_this_elem += threads_per_elem) row_splits[prev_row + 1 + thread_this_elem] = elem; } } // see declaration in utils.h for documentation. void RowIdsToRowSplits(ContextPtr c, int32_t num_elems, const int32_t *row_ids, bool no_empty_rows, int32_t num_rows, int32_t *row_splits) { NVTX_RANGE(K2_FUNC); // process corner case first if (num_elems == 0) { K2_EVAL( c, num_rows + 1, lambda_set_values, (int32_t i)->void { row_splits[i] = 0; }); return; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row = -1; for (int32_t i = 0; i < num_elems; i++) { int32_t row = row_ids[i]; K2_CHECK_GE(row, cur_row); while (cur_row < row) { cur_row++; row_splits[cur_row] = i; } } // cur_row must be >= 0 here as num_elems > 0 K2_CHECK_GE(cur_row, 0); while (cur_row < num_rows) { row_splits[++cur_row] = num_elems; } } else { K2_CHECK_EQ(d, kCuda); #if 1 // moderngpu is faster auto lambda_set_row_splits = [=] __device__(int32_t i) { if (i == num_rows) row_splits[i] = num_elems; else row_splits[i] = i; }; EvalDevice(c, num_rows + 1, lambda_set_row_splits); mgpu::context_t *mgpu_allocator = GetModernGpuAllocator(c); mgpu::sorted_search<mgpu::bounds_lower>( row_splits, num_rows, row_ids, num_elems, row_splits, LessThan<int32_t>(), *mgpu_allocator); #elif 0 Array1<int32_t> counts = GetCounts(c, row_ids, num_elems, num_rows + 1); ExclusiveSum(c, num_rows + 1, counts.Data(), row_splits); #else if (no_empty_rows) { auto lambda_simple = [=] __device__(int32_t i) { int32_t this_row = row_ids[i], prev_row; if (i > 0) { // (normal case) prev_row = row_ids[i - 1]; } else { // i == 0 row_splits[num_rows] = num_elems; prev_row = -1; } K2_CHECK_LE(this_row, prev_row + 1); // no_empty_rows was asserted by // the user if (this_row > prev_row) { row_splits[this_row] = i; } }; EvalDevice(c, num_elems, lambda_simple); return; } else { // By doing "+ 2" instead of "+ 1" we increase the minimum number of // threads-per-row, which may reduce latency when there are successive // empty rows. Any value >= 1 is correct though. int32_t avg_rows_per_elem = num_rows / num_elems + 2, threads_per_elem = RoundUpToNearestPowerOfTwo(avg_rows_per_elem), tot_threads = (num_elems + 1) * threads_per_elem; // +1 for the last row int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(RowIdsToRowSplitsKernel), dim3(grid_size), dim3(block_size), 0, c->GetCudaStream(), num_elems, threads_per_elem, row_ids, num_rows, row_splits)); } #endif } } /* Called inside GetTaskRedirect(); see documentation of that in header. Each task with 0 <= task < num_tasks gets allocated `threads_per_job` threads, e.g. threads_per_job = 4 or 16. It's a kind of n-ary search (generalization of binary search) where each branch is handled by a different thread so they can happen in parallel. TODO(dan): there are a lot of opportunities to further optimize this using GPU hardware tricks. The thread-block size this is called with must be jobs_per_block * threads_per_job. */ /* template <int32_t jobs_per_block, int32_t threads_per_job> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { __shared__ int32_t temp[tasks_per_block]; // we do __syncwarp() for synchronization below; we require threads_per_job <= // 32 for this reason. static_assert(threads_per_job >= 2 && threads_per_job <= 32); // We have work to do for 0 <= job_idx < num_tasks, but be careful: job_idx // may be >= num_tasks if num_tasks is small or not a power of two (we don't // return because we need to do __syncwarp()). So we have to avoid out of // bounds memory access. int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_job; // `branch_idx` is which member we are of the group of the `threads_per_job` threads for this job. int32_t branch_idx = threadIdx.x % threads_per_job; // we assume blockDim.x % threads_per_job == 0 // `temp_idx` is which index in the temporary storage `temp` we are assigned // (one per job). int32_t temp_idx = threadIdx.x / threads_per_job; // TODO: we may at some point decide that row_splits[0] has to be zero. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; if (num_items <= 0) { assert(num_items == 0); // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_job >= 2); if (branch_idx < 2 && job_idx < num_tasks) { TaskRedirect tr { job_idx, 2, branch_idx }; redirect_out[job_idx + branch_idx * num_tasks] = tr; } return; } else if (branch_idx == 0 && job_idx < num_tasks) { // This code writes to the jobs in the first half of the output array, // that are allocated to the same-numbered task. int32_t task_idx = job_idx, this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation); TaskRedirect tr { task_idx, num_jobs_this_task, 0 }; redirect_out[task_idx] = tr; } // Now we have the less-trivial task of assigning the jobs in the 2nd half of the // output array to tasks (these are allocated roughly proportional to the amount // of work to do for that task). // We do the selection by throwing darts at a dart-board, evenly spaced, and seeing which task they correspond // to. There are `num_tasks` darts). // Note: we know dart_location < row_splits_nt because job_idx < num_tasks and // because integer division rounds down. int32_t dart_separation = num_items / num_tasks, dart_location = row_splits0 + job_idx * dart_separation; // OK, from this point the goal is to find a task_idx such that // row_splits[task_idx] <= dart_location < row_splits[task_idx + 1]. // This is guaranteed to exist, as long as job_id < num_tasks. // As long as job_id < num_tasks, we maintain the property that // row_splits[lower_bound] <= dart_location && // (upper_bound > num_tasks || row_splits[upper_bound] > dart_location). // (where upper_bound == lower_bound + range), i.e. they are truly // lower and upper bounds int32_t lower_bound = 0, range = num_tasks; // we are responsible for items lower_bound through // (upper_bound = lower_bound + range) - 1. while (range > threads_per_job) { int32_t upper_bound = lower_bound + range; // We need to narrow the range of `task_idx` that might be the correct one. // We round *up* because we require that task_idx_step * threads_per_job >= // range, so that we cover the entire range. int32_t task_idx_step = (range + threads_per_job - 1) / threads_per_job, // >= 2 my_lower_task_idx = lower_bound + branch_idx * task_idx_step, my_upper_task_idx = my_lower_task_idx + task_idx_step; // The following avoids out-of-bounds memory accesses. if (my_upper_task_idx > upper_bound) my_upper_task_idx = upper_bound; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. if (my_lower_task_idx < num_tasks && row_splits[my_lower_task_idx] <= dart_location && dart_location < row_splits[my_upper_task_idx]) { // I am the "chosen branch" (exactly one will be chosen, as long as // job_idx < num_tasks). temp[temp_idx] = branch_idx; } __syncwarp(); int32_t chosen_branch_idx = temp[temp_idx]; lower_bound = lower_bound + chosen_branch_idx * task_idx_step; upper_bound = lower_bound + task_idx_step; range = task_idx_step; // note, we don't limit upper_bound to be <= num_tasks because we need all // threads in the block to go around the while loop the same number of // times. Therefore it's possible that upper_bound > num_tasks. K2_DASSERT(job_idx >= num_tasks || (row_splits[lower_bound] <= dart_location && (upper_bound > num_tasks || row_splits[upper_bound] > dart_location))); // TODO: remove once debugged. } int32_t task_idx = lower_bound + branch_idx; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. // // The check `task_idx < num_tasks` is to avoid out-of-bounds access of row_splits. // The check `job_idx < num_tasks` is to avoid out-of-bounds access of `redirect_out`; // for these out-of-range job_idx values, it's possible for task_idx to have // any value since it may be uninitialized memory. if (task_idx < num_tasks && job_idx < num_tasks) { int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; if (this_row_split <= dart_location && dart_location < next_row_split) { // OK, exactly one branch per job will reach this point. `num_jobs` below // is the number of jobs that will be active for this task. (The "1 // +".. is the job that we assign for each task, one job per task, in the // "first half" of the jobs). The job_id_this_task we're working out // below is the job_id within the second half of the TaskRedirects, // the half that are allocated by throwing darts. int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation), job_idx_this_task = 1 + (dart_location - this_row_split)/dart_separation; K2_CHECK(job_id_this_task < num_jobs_this_task); TaskRedirect tr { task_idx, num_jobs_this_task, job_idx_this_task }; redirect_out[num_tasks + job_idx] = tr; } } } */ /* This is a quite simple implementation of GetTaskRedirect... I had a more complicated one above that had better O(N) performance for hard cases, but this one will handle more normal/smaller cases better, plus is easier to debug. The basic idea is to throw lots of threads at it, i.e. threads_per_task should be, say, twice larger than the average / expected number of jobs per task, so that if a task has lots of jobs it doesn't have to loop too many times. */ template <int32_t threads_per_task> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x; int32_t task_idx = thread / threads_per_task; if (task_idx >= num_tasks) return; // `thread_idx` is which member we are of the group of the `threads_per_job` // threads for this job. int32_t thread_idx = thread % threads_per_task; int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; // the 'num_items' is the // total amount of work to // do, that we want to // distribute fairly evenly. // The idea with `dart_separation` is this: Half of the jobs we allocate to // the corresponding tasks. The other half we allocate by throwing darts onto // the interval [0, num_items - 1], evenly spaced starting from 0, and seeing // which tasks they land in. This is somewhat random but it ensures that if // any task has a very large amount of work to do, it will get a roughly // proportionate number of jobs. int32_t dart_separation = num_items / num_tasks; if (dart_separation <= 0) { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_task >= 2, "threads per task must >= 2"); if (thread_idx < 2) { TaskRedirect tr{task_idx, 2, static_cast<uint16_t>(thread_idx)}; redirect_out[task_idx + thread_idx * num_tasks] = tr; } return; } // TODO(dan): IDK how well the hardware combines these memory requests; could // consider loading to shared memory first. int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (min(next_row_split / dart_separation, num_tasks) - min(this_row_split / dart_separation, num_tasks)); // function `min` is from cuda K2_CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = thread_idx; job_id_this_task < num_jobs_this_task; job_id_this_task += threads_per_task) { int32_t job_idx = (job_id_this_task == 0 ? task_idx : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task_idx, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } void GetTaskRedirect(hipStream_t stream, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { NVTX_RANGE(K2_FUNC); if (num_tasks <= 0) return; if (stream == kCudaStreamInvalid) { // there's not much point in using this on CPU as there are better ways // to do things (sequentially), but this can be useful for debugging. // The idea with `dart_separation` is this: Half of the jobs we allocate // to the corresponding tasks. The other half we allocate by throwing // darts onto the interval [0, num_items - 1], evenly spaced starting from // 0, and seeing which tasks they land in. This is somewhat random but it // ensures that if any task has a very large amount of work to do, it will // get a roughly proportionate number of jobs. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0, dart_separation = num_items / num_tasks; if (dart_separation != 0) { for (int32_t task = 0; task < num_tasks; ++task) { int32_t this_row_split = row_splits[task], next_row_split = row_splits[task + 1]; int32_t num_jobs_this_task = 1 + (::min(next_row_split / dart_separation, num_tasks) - ::min(this_row_split / dart_separation, num_tasks)); K2_CHECK_EQ( static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = (job_id_this_task == 0 ? task : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } else { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return for (int32_t task = 0; task < num_tasks; ++task) { int32_t num_jobs_this_task = 2; for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = task + job_id_this_task * num_tasks; redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } } else { // compare 8 to 2, which is the expected number of jobs per task. having // 8 substantially greater than 2 gives a fairly big safety factor. // However this is still far from ideal in scenarios where the number of // tasks might be highly unbalanced. const int32_t threads_per_task = 8, tot_threads = threads_per_task * num_tasks; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(GetTaskRedirect<threads_per_task>) , dim3(grid_size), dim3(block_size), 0, stream, num_tasks, row_splits, redirect_out)); } } void GetTaskRedirect(ContextPtr &c, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { GetTaskRedirect(c->GetCudaStream(), num_tasks, row_splits, redirect_out); } } // namespace k2
678ef0b6c4ae7857c4c68126aaee4fa17a85e4a8.cu
/** * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include "cub/cub.cuh" #include "k2/csrc/array_ops.h" #include "k2/csrc/macros.h" #include "k2/csrc/math.h" #include "k2/csrc/moderngpu_allocator.h" #include "k2/csrc/nvtx.h" #include "k2/csrc/utils.h" #include "moderngpu/kernel_load_balance.hxx" #include "moderngpu/kernel_sortedsearch.hxx" namespace k2 { // See FillValues() where this is invoked. It fills a region with // a constant value. __global__ void FillValuesKernel(int32_t *data, int32_t num_values, int32_t value) { int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x), stride = (gridDim.x * blockDim.x); for (; job_idx < num_values; job_idx += stride) data[job_idx] = value; } // This launches a kernel. It's the same as doing: // for (int32_t i = 0; i < num_values; i++) data[i] = value; __device__ void FillValues(int32_t *data, int32_t num_values, int32_t value) { int32_t block_size = 256; int32_t grid_size = NumBlocks(num_values, block_size); FillValuesKernel<<<grid_size, block_size>>>(data, num_values, value); } // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void RowSplitsToRowIdsKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (row_length / threads_per_row > max_loop) { // We decide that looping too many times will be too slow, so we launch // another kernel to fill in the value for this row. (This is CUDA dynamic // parallelism). if (thread_this_row == 0) { FillValues(row_ids + this_row_split, row_length, row); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_row < row_length; thread_this_row += threads_per_row) row_ids[this_row_split + thread_this_row] = row; } } /* See declaration of RowSplitsToRowIds() in utils.h. These are implementation notes. Suppose the range we need to fill with a particular number (say, x) is from 1010 to 10000 inclusive (binary) The first kernel writes x to positions 1010, 1100, 10000; the significance of that sequence is we keep adding the smallest number we can add to get another zero at the end of the binary representation, until we exceed the range we're supposed to fill. The second kernel: for a given index into x that is must fill (say, 1111), it asks "is the index currently here already the right one?", which it can test using the function is_valid_index() below; if it's not already correct, it searches in a sequence of positions: 1110, 1100, 1000, 0000, like our sequence above but going downwards, again getting more zeros at the end of the binary representation, until it finds the correct value in the array at the searched position; then it copies the discovered value the original position requested (here, 1111). First kernel pseudocode: for each index 'i' into 't', it does: for (int32_t n=0, j = t[i]; j < t[i+1]; n++) { x[j] = i; if (j & (1<<n)) j += (1 << n); } Second kernel pseudocode: for each element of x, it searches for the right index. Suppose we're given num_indexes == length(n) == length(t) - 1. Define is_valid_index as follows: // returns true if j is the value that we should be putting at position 'i' in x: // that is, if t[j] <= i < t[j+1]. bool is_valid_index(i, j) { return (j >= 0 && j < num_indexes && t[j] <= i && i < t[j+1]); } // We suppose we are given i (the position into x that we're responsible for // setting: orig_i = i; for (int32_t n=0; !is_valid_index(i, x[i]); n++) { if (i & (1<<n)) i -= (1 << n); } x[orig_i] = x[i]; */ void RowSplitsToRowIds(ContextPtr c, int32_t num_rows, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { NVTX_RANGE(K2_FUNC); if (num_rows <= 0 || num_elems <= 0) return; DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row_start = row_splits[0]; K2_CHECK_EQ(cur_row_start, 0); K2_CHECK_EQ(row_splits[num_rows], num_elems); for (int32_t row = 0; row < num_rows; ++row) { int32_t next_row_start = row_splits[row + 1]; for (; cur_row_start < next_row_start; ++cur_row_start) row_ids[cur_row_start] = row; } } else { K2_CHECK_EQ(d, kCuda); if (1) { #if 1 mgpu::context_t *mgpu_allocator = GetModernGpuAllocator(c); mgpu::load_balance_search(num_elems, row_splits, num_rows, row_ids, *mgpu_allocator); #elif 0 auto lambda_set_minus_1 = [=] __device__(int32_t i) -> void { row_ids[i] = -1; }; EvalDevice(c, num_elems, lambda_set_minus_1); auto lambda_set_row_ids_start = [=] __device__(int32_t i) -> void { if (row_splits[i + 1] > row_splits[i]) row_ids[row_splits[i]] = i; }; EvalDevice(c, num_rows, lambda_set_row_ids_start); size_t temp_storage_bytes; cub::DeviceScan::InclusiveScan(nullptr, temp_storage_bytes, row_ids, row_ids, MaxOp<int32_t>(), num_elems, c->GetCudaStream()); Array1<int8_t> d_temp_storage(c, temp_storage_bytes); cub::DeviceScan::InclusiveScan(d_temp_storage.Data(), temp_storage_bytes, row_ids, row_ids, MaxOp<int32_t>(), num_elems, c->GetCudaStream()); #else // TODO: compare this for speed with the other branch. This is branch is // much simpler, and will be considerably faster for "normal" cases -> // probably preferred. int32_t avg_elems_per_row = (num_elems + num_rows - 1) / num_rows, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_rows * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(RowSplitsToRowIdsKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>( num_rows, threads_per_row, row_splits, num_elems, row_ids)); #endif } else { // TODO: Will probably just delete this branch at some point. // The following algorithm isn't particularly adapted to GPU hardware in // terms of coalesced reads and writes and so on, but it has reasonable // asymptotic time complexity (assuming all kernels run in parallel), // specifically: O(log(largest(row_splits[i+1]-row_splits[i]))) K2_EVAL( c, num_elems + 1, lambda_init_minus_one, (int32_t i)->void { row_ids[i] = -1; }); K2_EVAL( c, num_elems + 1, lambda_phase_one, (int32_t i)->void { int32_t this_row_split = row_splits[i], next_row_split = (i < num_rows ? row_splits[i + 1] : this_row_split + 1); if (this_row_split < next_row_split) row_ids[this_row_split] = i; // we have to fill in row_ids[this_row_split], // row_ids[this_row_split+1]... row_ids[next_row_split-1] with the // same value but that could be a long loop. Instead we write at // this_row_split and all indexes this_row_split < i < // next_row_split such that i is the result of rounding up // this_row_split to (something)*2^n, for n = 1, 2, 3, ... this will // take time logarithmic in (next_row_split - this_row_split). we // can then fill in the gaps with a logarithmic-time loop, by // looking for a value that's not (-1) by rounding the current index // down to successively higher powers of 2. for (int32_t power = 0, j = this_row_split; j + (1 << power) < next_row_split; power++) { if (j & (1 << power)) { j += (1 << power); // we know that j is now < next_row_split, because we checked "j // + (1<<power) < next_row_split" in the loop condition. Note, // we don't want a loop-within-a-loop because of how SIMT // works... row_ids[j] = i; } } }); // could do the next line for num_elems+1, but the element at `num_elems` // will already be set. K2_EVAL( c, num_elems, lambda_phase_two, (int32_t j)->void { int32_t row_index = row_ids[j]; if (row_index != -1) return; int32_t power = 0, j2 = j; for (; row_index != -1; power++) { if (j2 & (1 << power)) { j2 -= (1 << power); row_index = row_ids[j2]; } assert(power < 31); } row_ids[j] = row_ids[j2]; }); } } } /* When we invoke this we make a big enough grid that there doesn't have to be a loop over elements, i.e. (gridDim.x * blockDim.x) / threads_per_elem > num_elems. (must be >=, because we imagine a phantom element at [num_elems] with the value `num_rows`.) @param [in] num_elems Number of elements in ragged matrix @param [in] threads_per_elem Number of threads we allocate per element. Must be >= 1. @param [in] row_ids The row_ids vector, of length `num_elems`; must be nonnegative and non-decreasing and all elements < num_rows. @param [in] num_rows Number of rows, must be greater than the largest (== last) element of `row_ids`. @param [out] row_splits This kernel will output a non-decreasing vector of length num_rows + 1, such that row_splits[0] == 0, row_splits[num_rows] == num_elems, and row_splits[row_ids[i]] <= i < row_splits[row_ids[i]+1] */ __global__ void RowIdsToRowSplitsKernel(int32_t num_elems, int32_t threads_per_elem, const int32_t *row_ids, int32_t num_rows, int32_t *row_splits) { int32_t thread = (blockIdx.x * blockDim.x + threadIdx.x), num_threads = gridDim.x * blockDim.x, elem = thread / threads_per_elem, thread_this_elem = thread % threads_per_elem; K2_CHECK_GE(num_threads / threads_per_elem, num_elems); if (elem > num_elems) return; int32_t this_row, prev_row; if (elem == 0) { prev_row = -1; this_row = row_ids[elem]; } else if (elem == num_elems) { prev_row = row_ids[elem - 1]; this_row = num_rows; } else { prev_row = row_ids[elem - 1]; this_row = row_ids[elem]; } // `num_splits` is the number of splits we have to write, usually 0 or 1 // but in principle unlimited as there could be empty rows. The // relationship between row_ids and row_splits is more symmetric than // you might expect. int32_t num_splits = this_row - prev_row; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (num_splits / threads_per_elem > max_loop) { if (thread_this_elem == 0) { FillValues(row_splits + prev_row + 1, num_splits, elem); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_elem < num_splits; thread_this_elem += threads_per_elem) row_splits[prev_row + 1 + thread_this_elem] = elem; } } // see declaration in utils.h for documentation. void RowIdsToRowSplits(ContextPtr c, int32_t num_elems, const int32_t *row_ids, bool no_empty_rows, int32_t num_rows, int32_t *row_splits) { NVTX_RANGE(K2_FUNC); // process corner case first if (num_elems == 0) { K2_EVAL( c, num_rows + 1, lambda_set_values, (int32_t i)->void { row_splits[i] = 0; }); return; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row = -1; for (int32_t i = 0; i < num_elems; i++) { int32_t row = row_ids[i]; K2_CHECK_GE(row, cur_row); while (cur_row < row) { cur_row++; row_splits[cur_row] = i; } } // cur_row must be >= 0 here as num_elems > 0 K2_CHECK_GE(cur_row, 0); while (cur_row < num_rows) { row_splits[++cur_row] = num_elems; } } else { K2_CHECK_EQ(d, kCuda); #if 1 // moderngpu is faster auto lambda_set_row_splits = [=] __device__(int32_t i) { if (i == num_rows) row_splits[i] = num_elems; else row_splits[i] = i; }; EvalDevice(c, num_rows + 1, lambda_set_row_splits); mgpu::context_t *mgpu_allocator = GetModernGpuAllocator(c); mgpu::sorted_search<mgpu::bounds_lower>( row_splits, num_rows, row_ids, num_elems, row_splits, LessThan<int32_t>(), *mgpu_allocator); #elif 0 Array1<int32_t> counts = GetCounts(c, row_ids, num_elems, num_rows + 1); ExclusiveSum(c, num_rows + 1, counts.Data(), row_splits); #else if (no_empty_rows) { auto lambda_simple = [=] __device__(int32_t i) { int32_t this_row = row_ids[i], prev_row; if (i > 0) { // (normal case) prev_row = row_ids[i - 1]; } else { // i == 0 row_splits[num_rows] = num_elems; prev_row = -1; } K2_CHECK_LE(this_row, prev_row + 1); // no_empty_rows was asserted by // the user if (this_row > prev_row) { row_splits[this_row] = i; } }; EvalDevice(c, num_elems, lambda_simple); return; } else { // By doing "+ 2" instead of "+ 1" we increase the minimum number of // threads-per-row, which may reduce latency when there are successive // empty rows. Any value >= 1 is correct though. int32_t avg_rows_per_elem = num_rows / num_elems + 2, threads_per_elem = RoundUpToNearestPowerOfTwo(avg_rows_per_elem), tot_threads = (num_elems + 1) * threads_per_elem; // +1 for the last row int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(RowIdsToRowSplitsKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>( num_elems, threads_per_elem, row_ids, num_rows, row_splits)); } #endif } } /* Called inside GetTaskRedirect(); see documentation of that in header. Each task with 0 <= task < num_tasks gets allocated `threads_per_job` threads, e.g. threads_per_job = 4 or 16. It's a kind of n-ary search (generalization of binary search) where each branch is handled by a different thread so they can happen in parallel. TODO(dan): there are a lot of opportunities to further optimize this using GPU hardware tricks. The thread-block size this is called with must be jobs_per_block * threads_per_job. */ /* template <int32_t jobs_per_block, int32_t threads_per_job> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { __shared__ int32_t temp[tasks_per_block]; // we do __syncwarp() for synchronization below; we require threads_per_job <= // 32 for this reason. static_assert(threads_per_job >= 2 && threads_per_job <= 32); // We have work to do for 0 <= job_idx < num_tasks, but be careful: job_idx // may be >= num_tasks if num_tasks is small or not a power of two (we don't // return because we need to do __syncwarp()). So we have to avoid out of // bounds memory access. int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_job; // `branch_idx` is which member we are of the group of the `threads_per_job` threads for this job. int32_t branch_idx = threadIdx.x % threads_per_job; // we assume blockDim.x % threads_per_job == 0 // `temp_idx` is which index in the temporary storage `temp` we are assigned // (one per job). int32_t temp_idx = threadIdx.x / threads_per_job; // TODO: we may at some point decide that row_splits[0] has to be zero. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; if (num_items <= 0) { assert(num_items == 0); // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_job >= 2); if (branch_idx < 2 && job_idx < num_tasks) { TaskRedirect tr { job_idx, 2, branch_idx }; redirect_out[job_idx + branch_idx * num_tasks] = tr; } return; } else if (branch_idx == 0 && job_idx < num_tasks) { // This code writes to the jobs in the first half of the output array, // that are allocated to the same-numbered task. int32_t task_idx = job_idx, this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation); TaskRedirect tr { task_idx, num_jobs_this_task, 0 }; redirect_out[task_idx] = tr; } // Now we have the less-trivial task of assigning the jobs in the 2nd half of the // output array to tasks (these are allocated roughly proportional to the amount // of work to do for that task). // We do the selection by throwing darts at a dart-board, evenly spaced, and seeing which task they correspond // to. There are `num_tasks` darts). // Note: we know dart_location < row_splits_nt because job_idx < num_tasks and // because integer division rounds down. int32_t dart_separation = num_items / num_tasks, dart_location = row_splits0 + job_idx * dart_separation; // OK, from this point the goal is to find a task_idx such that // row_splits[task_idx] <= dart_location < row_splits[task_idx + 1]. // This is guaranteed to exist, as long as job_id < num_tasks. // As long as job_id < num_tasks, we maintain the property that // row_splits[lower_bound] <= dart_location && // (upper_bound > num_tasks || row_splits[upper_bound] > dart_location). // (where upper_bound == lower_bound + range), i.e. they are truly // lower and upper bounds int32_t lower_bound = 0, range = num_tasks; // we are responsible for items lower_bound through // (upper_bound = lower_bound + range) - 1. while (range > threads_per_job) { int32_t upper_bound = lower_bound + range; // We need to narrow the range of `task_idx` that might be the correct one. // We round *up* because we require that task_idx_step * threads_per_job >= // range, so that we cover the entire range. int32_t task_idx_step = (range + threads_per_job - 1) / threads_per_job, // >= 2 my_lower_task_idx = lower_bound + branch_idx * task_idx_step, my_upper_task_idx = my_lower_task_idx + task_idx_step; // The following avoids out-of-bounds memory accesses. if (my_upper_task_idx > upper_bound) my_upper_task_idx = upper_bound; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. if (my_lower_task_idx < num_tasks && row_splits[my_lower_task_idx] <= dart_location && dart_location < row_splits[my_upper_task_idx]) { // I am the "chosen branch" (exactly one will be chosen, as long as // job_idx < num_tasks). temp[temp_idx] = branch_idx; } __syncwarp(); int32_t chosen_branch_idx = temp[temp_idx]; lower_bound = lower_bound + chosen_branch_idx * task_idx_step; upper_bound = lower_bound + task_idx_step; range = task_idx_step; // note, we don't limit upper_bound to be <= num_tasks because we need all // threads in the block to go around the while loop the same number of // times. Therefore it's possible that upper_bound > num_tasks. K2_DASSERT(job_idx >= num_tasks || (row_splits[lower_bound] <= dart_location && (upper_bound > num_tasks || row_splits[upper_bound] > dart_location))); // TODO: remove once debugged. } int32_t task_idx = lower_bound + branch_idx; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. // // The check `task_idx < num_tasks` is to avoid out-of-bounds access of row_splits. // The check `job_idx < num_tasks` is to avoid out-of-bounds access of `redirect_out`; // for these out-of-range job_idx values, it's possible for task_idx to have // any value since it may be uninitialized memory. if (task_idx < num_tasks && job_idx < num_tasks) { int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; if (this_row_split <= dart_location && dart_location < next_row_split) { // OK, exactly one branch per job will reach this point. `num_jobs` below // is the number of jobs that will be active for this task. (The "1 // +".. is the job that we assign for each task, one job per task, in the // "first half" of the jobs). The job_id_this_task we're working out // below is the job_id within the second half of the TaskRedirects, // the half that are allocated by throwing darts. int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation), job_idx_this_task = 1 + (dart_location - this_row_split)/dart_separation; K2_CHECK(job_id_this_task < num_jobs_this_task); TaskRedirect tr { task_idx, num_jobs_this_task, job_idx_this_task }; redirect_out[num_tasks + job_idx] = tr; } } } */ /* This is a quite simple implementation of GetTaskRedirect... I had a more complicated one above that had better O(N) performance for hard cases, but this one will handle more normal/smaller cases better, plus is easier to debug. The basic idea is to throw lots of threads at it, i.e. threads_per_task should be, say, twice larger than the average / expected number of jobs per task, so that if a task has lots of jobs it doesn't have to loop too many times. */ template <int32_t threads_per_task> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x; int32_t task_idx = thread / threads_per_task; if (task_idx >= num_tasks) return; // `thread_idx` is which member we are of the group of the `threads_per_job` // threads for this job. int32_t thread_idx = thread % threads_per_task; int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; // the 'num_items' is the // total amount of work to // do, that we want to // distribute fairly evenly. // The idea with `dart_separation` is this: Half of the jobs we allocate to // the corresponding tasks. The other half we allocate by throwing darts onto // the interval [0, num_items - 1], evenly spaced starting from 0, and seeing // which tasks they land in. This is somewhat random but it ensures that if // any task has a very large amount of work to do, it will get a roughly // proportionate number of jobs. int32_t dart_separation = num_items / num_tasks; if (dart_separation <= 0) { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_task >= 2, "threads per task must >= 2"); if (thread_idx < 2) { TaskRedirect tr{task_idx, 2, static_cast<uint16_t>(thread_idx)}; redirect_out[task_idx + thread_idx * num_tasks] = tr; } return; } // TODO(dan): IDK how well the hardware combines these memory requests; could // consider loading to shared memory first. int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (min(next_row_split / dart_separation, num_tasks) - min(this_row_split / dart_separation, num_tasks)); // function `min` is from cuda K2_CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = thread_idx; job_id_this_task < num_jobs_this_task; job_id_this_task += threads_per_task) { int32_t job_idx = (job_id_this_task == 0 ? task_idx : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task_idx, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } void GetTaskRedirect(cudaStream_t stream, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { NVTX_RANGE(K2_FUNC); if (num_tasks <= 0) return; if (stream == kCudaStreamInvalid) { // there's not much point in using this on CPU as there are better ways // to do things (sequentially), but this can be useful for debugging. // The idea with `dart_separation` is this: Half of the jobs we allocate // to the corresponding tasks. The other half we allocate by throwing // darts onto the interval [0, num_items - 1], evenly spaced starting from // 0, and seeing which tasks they land in. This is somewhat random but it // ensures that if any task has a very large amount of work to do, it will // get a roughly proportionate number of jobs. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0, dart_separation = num_items / num_tasks; if (dart_separation != 0) { for (int32_t task = 0; task < num_tasks; ++task) { int32_t this_row_split = row_splits[task], next_row_split = row_splits[task + 1]; int32_t num_jobs_this_task = 1 + (std::min(next_row_split / dart_separation, num_tasks) - std::min(this_row_split / dart_separation, num_tasks)); K2_CHECK_EQ( static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = (job_id_this_task == 0 ? task : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } else { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return for (int32_t task = 0; task < num_tasks; ++task) { int32_t num_jobs_this_task = 2; for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = task + job_id_this_task * num_tasks; redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } } else { // compare 8 to 2, which is the expected number of jobs per task. having // 8 substantially greater than 2 gives a fairly big safety factor. // However this is still far from ideal in scenarios where the number of // tasks might be highly unbalanced. const int32_t threads_per_task = 8, tot_threads = threads_per_task * num_tasks; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(GetTaskRedirect<threads_per_task> <<<grid_size, block_size, 0, stream>>>( num_tasks, row_splits, redirect_out)); } } void GetTaskRedirect(ContextPtr &c, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { GetTaskRedirect(c->GetCudaStream(), num_tasks, row_splits, redirect_out); } } // namespace k2
6adb3a6bea5f672bce01cc9eb3aa13baabee2c60.hip
// !!! This is a file automatically generated by hipify!!! #include "cp.h" #include <hip/hip_runtime.h> #include <cmath> #include <stdio.h> #include <iostream> #define CHECK_CUDA_ERROR(call) do { \ hipError_t result_ = (call); \ if (result_ != hipSuccess) { \ fprintf(stderr, #call " failed: %s\n", \ hipGetErrorString(result_)); \ exit(1); \ } \ } while(0) double get_mean(const float* row, int nx) { double sum = 0.0, mean; for (int x=0; x<nx; x++) { sum += (double) row[x]; } mean = sum / (double) nx; return mean; } double get_root_square_sum(double* row, int nx) { double square_sum = 0, root_square_sum; for (int x=0; x<nx; x++) { square_sum += pow((double) row[x], 2.0); } root_square_sum = std::sqrt(square_sum); return root_square_sum; } __global__ void matrix_multiply(int nx, int ny, double* X, float* result) { double r = 0; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < y || x >= ny || y >= ny) return; for (int i=0; i<nx; i++) { r += X[x*nx + i] * X[y*nx + i]; } result[y*ny + x] = r; } void correlate(int ny, int nx, const float* data, float* result) { double row_mean, row_rss; double* X = (double*) malloc(sizeof(double) * nx * ny); double* dev_X; float* dev_result; for (int y=0; y<ny; y++) { row_mean = get_mean(&data[y*nx], nx); for (int x=0; x<nx; x++) { X[y*nx + x] = ((double) data[y*nx + x]) - row_mean; } row_rss = get_root_square_sum(&X[y*nx], nx); for (int x=0; x<nx; x++) { X[y*nx + x] = X[y*nx + x] / row_rss; } } // Allocate GPU memory for X CHECK_CUDA_ERROR( hipMalloc( (void**)&dev_X, ny * nx * sizeof(double) ) ); CHECK_CUDA_ERROR( hipMalloc( (void**)&dev_result, ny * ny * sizeof(float) ) ); // Copy X to the GPU memory. CHECK_CUDA_ERROR( hipMemcpy(dev_X, X, sizeof(double)*nx*ny, hipMemcpyHostToDevice) ); dim3 szBlock(8, 8); dim3 szGrid((ny + szBlock.x - 1) / szBlock.x, (ny + szBlock.y - 1) / szBlock.y); hipLaunchKernelGGL(( matrix_multiply), dim3(szGrid), dim3(szBlock), 0, 0, nx, ny, dev_X, dev_result); CHECK_CUDA_ERROR(hipGetLastError()); // Copy result back to the CPU memory CHECK_CUDA_ERROR( hipMemcpy(result, dev_result, sizeof(float)*ny*ny, hipMemcpyDeviceToHost) ); free(X); hipFree(dev_X); hipFree(dev_result); }
6adb3a6bea5f672bce01cc9eb3aa13baabee2c60.cu
#include "cp.h" #include <cuda_runtime.h> #include <cmath> #include <stdio.h> #include <iostream> #define CHECK_CUDA_ERROR(call) do { \ cudaError_t result_ = (call); \ if (result_ != cudaSuccess) { \ fprintf(stderr, #call " failed: %s\n", \ cudaGetErrorString(result_)); \ exit(1); \ } \ } while(0) double get_mean(const float* row, int nx) { double sum = 0.0, mean; for (int x=0; x<nx; x++) { sum += (double) row[x]; } mean = sum / (double) nx; return mean; } double get_root_square_sum(double* row, int nx) { double square_sum = 0, root_square_sum; for (int x=0; x<nx; x++) { square_sum += pow((double) row[x], 2.0); } root_square_sum = std::sqrt(square_sum); return root_square_sum; } __global__ void matrix_multiply(int nx, int ny, double* X, float* result) { double r = 0; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < y || x >= ny || y >= ny) return; for (int i=0; i<nx; i++) { r += X[x*nx + i] * X[y*nx + i]; } result[y*ny + x] = r; } void correlate(int ny, int nx, const float* data, float* result) { double row_mean, row_rss; double* X = (double*) malloc(sizeof(double) * nx * ny); double* dev_X; float* dev_result; for (int y=0; y<ny; y++) { row_mean = get_mean(&data[y*nx], nx); for (int x=0; x<nx; x++) { X[y*nx + x] = ((double) data[y*nx + x]) - row_mean; } row_rss = get_root_square_sum(&X[y*nx], nx); for (int x=0; x<nx; x++) { X[y*nx + x] = X[y*nx + x] / row_rss; } } // Allocate GPU memory for X CHECK_CUDA_ERROR( cudaMalloc( (void**)&dev_X, ny * nx * sizeof(double) ) ); CHECK_CUDA_ERROR( cudaMalloc( (void**)&dev_result, ny * ny * sizeof(float) ) ); // Copy X to the GPU memory. CHECK_CUDA_ERROR( cudaMemcpy(dev_X, X, sizeof(double)*nx*ny, cudaMemcpyHostToDevice) ); dim3 szBlock(8, 8); dim3 szGrid((ny + szBlock.x - 1) / szBlock.x, (ny + szBlock.y - 1) / szBlock.y); matrix_multiply<<<szGrid, szBlock>>>(nx, ny, dev_X, dev_result); CHECK_CUDA_ERROR(cudaGetLastError()); // Copy result back to the CPU memory CHECK_CUDA_ERROR( cudaMemcpy(result, dev_result, sizeof(float)*ny*ny, cudaMemcpyDeviceToHost) ); free(X); cudaFree(dev_X); cudaFree(dev_result); }
d4d0056dd3b4555ee2ebc91eb008d6b6a7acfc45.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <chrono> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" ////////////////////////////// // forward ////////////////////////////// __global__ void kernal_fp32_BinaryToReal_Forward( const float* x_buf, float* y_buf, float gain, int node_mux_size, int frame_mux_size, int y_node_size, int x_frame_stride, int y_frame_size, int y_frame_stride ) { int y_frame = blockDim.x * blockIdx.x + threadIdx.x; int y_node = blockDim.y * blockIdx.y + threadIdx.y; if (y_frame >= y_frame_size || y_node >= y_node_size) { return; } float sum = 0; int x_frame = y_frame * frame_mux_size; for ( int node = 0; node < node_mux_size; ++node ) { int x_node = y_node_size * node + y_node; for ( int frame = 0; frame < frame_mux_size; ++frame ) { float x = x_buf[x_node * x_frame_stride + x_frame + frame]; sum += x; } } y_buf[y_node * y_frame_stride + y_frame] = sum * gain; } BBCU_DLL_EXPORT int bbcu_fp32_BinaryToReal_Forward ( const float* dev_x_buf, float* dev_y_buf, int node_mux_size, int frame_mux_size, int y_node_size, int x_frame_stride, int y_frame_size, int y_frame_stride, hipStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 block(y_frame_size, y_node_size); dim3 grid(1, 1); while ( block.y > 1 && block.x * block.y > 1024 ) { block.y = (block.y + 1) / 2; } grid.y = (y_node_size + (block.y - 1)) / block.y; while ( block.x > 1 && block.x * block.y > 1024 ) { block.x = (block.x + 1) / 2; } grid.x = (y_frame_size + (block.x - 1)) / block.x; hipLaunchKernelGGL(( kernal_fp32_BinaryToReal_Forward), dim3(grid), dim3(block), 0, streamId, dev_x_buf, dev_y_buf, 1.0f / (node_mux_size * frame_mux_size), node_mux_size, frame_mux_size, y_node_size, x_frame_stride, y_frame_size, y_frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } //////////////// __global__ void kernal_bit_fp32_BinaryToReal_Forward( int const *x_buf, float *y_buf, float gain, int node_mux_size, int frame_mux_size, int y_node_size, int x_frame_stride, int y_frame_size, int y_frame_stride ) { int y_frame = blockDim.x * blockIdx.x + threadIdx.x; int y_node = blockDim.y * blockIdx.y + threadIdx.y; if (y_frame >= y_frame_size || y_node >= y_node_size) { return; } float sum = 0; int x_frame = y_frame * frame_mux_size; for ( int node = 0; node < node_mux_size; ++node ) { int x_node = y_node_size * node + y_node; for ( int frame = 0; frame < frame_mux_size; ++frame ) { int x_unit = ((x_frame + frame) >> 5); int x_bit = ((x_frame + frame) & 0x1f); int x_bit_mask = (1 << x_bit); int x = x_buf[x_node * x_frame_stride + x_unit]; if ( x & x_bit_mask ) { sum += 1.0; } } } y_buf[y_node * y_frame_stride + y_frame] = sum * gain; } BBCU_DLL_EXPORT int bbcu_bit_fp32_BinaryToReal_Forward ( int const *dev_x_buf, float *dev_y_buf, int node_mux_size, int frame_mux_size, int y_node_size, int x_frame_stride, int y_frame_size, int y_frame_stride, hipStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 block(y_frame_size, y_node_size); dim3 grid(1, 1); while ( block.y > 1 && block.x * block.y > 1024 ) { block.y = (block.y + 1) / 2; } grid.y = (y_node_size + (block.y - 1)) / block.y; while ( block.x > 1 && block.x * block.y > 1024 ) { block.x = (block.x + 1) / 2; } grid.x = (y_frame_size + (block.x - 1)) / block.x; hipLaunchKernelGGL(( kernal_bit_fp32_BinaryToReal_Forward), dim3(grid), dim3(block), 0, streamId, dev_x_buf, dev_y_buf, 1.0f / (node_mux_size * frame_mux_size), node_mux_size, frame_mux_size, y_node_size, x_frame_stride, y_frame_size, y_frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } ////////////////////////////// // backward ////////////////////////////// __global__ void kernal_fp32_BinaryToReal_Backward( const float* dy_buf, float* dx_buf, float gain, int node_mux_size, int frame_mux_size, int y_node_size, int x_frame_stride, int y_frame_size, int y_frame_stride ) { int y_frame = blockDim.x * blockIdx.x + threadIdx.x; int y_node = blockDim.y * blockIdx.y + threadIdx.y; if (y_frame >= y_frame_size || y_node >= y_node_size) { return; } float dy = dy_buf[y_node * y_frame_stride + y_frame]; float val = dy * gain; int x_frame = y_frame * frame_mux_size; for ( int node = 0; node < node_mux_size; ++node ) { int x_node = y_node_size * node + y_node; for ( int frame = 0; frame < frame_mux_size; ++frame ) { dx_buf[x_node * x_frame_stride + x_frame + frame] = val; } } } BBCU_DLL_EXPORT int bbcu_fp32_BinaryToReal_Backward ( const float* dev_dy_buf, float* dev_dx_buf, int node_mux_size, int frame_mux_size, int y_node_size, int x_frame_stride, int y_frame_size, int y_frame_stride, hipStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 block(y_frame_size, y_node_size); dim3 grid(1, 1); while ( block.y > 1 && block.x * block.y > 1024 ) { block.y = (block.y + 1) / 2; } grid.y = (y_node_size + (block.y - 1)) / block.y; while ( block.x > 1 && block.x * block.y > 1024 ) { block.x = (block.x + 1) / 2; } grid.x = (y_frame_size + (block.x - 1)) / block.x; hipLaunchKernelGGL(( kernal_fp32_BinaryToReal_Backward), dim3(grid), dim3(block), 0, streamId, dev_dy_buf, dev_dx_buf, 1.0f / (node_mux_size * frame_mux_size), node_mux_size, frame_mux_size, y_node_size, x_frame_stride, y_frame_size, y_frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // end of file
d4d0056dd3b4555ee2ebc91eb008d6b6a7acfc45.cu
#include <iostream> #include <chrono> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" ////////////////////////////// // forward ////////////////////////////// __global__ void kernal_fp32_BinaryToReal_Forward( const float* x_buf, float* y_buf, float gain, int node_mux_size, int frame_mux_size, int y_node_size, int x_frame_stride, int y_frame_size, int y_frame_stride ) { int y_frame = blockDim.x * blockIdx.x + threadIdx.x; int y_node = blockDim.y * blockIdx.y + threadIdx.y; if (y_frame >= y_frame_size || y_node >= y_node_size) { return; } float sum = 0; int x_frame = y_frame * frame_mux_size; for ( int node = 0; node < node_mux_size; ++node ) { int x_node = y_node_size * node + y_node; for ( int frame = 0; frame < frame_mux_size; ++frame ) { float x = x_buf[x_node * x_frame_stride + x_frame + frame]; sum += x; } } y_buf[y_node * y_frame_stride + y_frame] = sum * gain; } BBCU_DLL_EXPORT int bbcu_fp32_BinaryToReal_Forward ( const float* dev_x_buf, float* dev_y_buf, int node_mux_size, int frame_mux_size, int y_node_size, int x_frame_stride, int y_frame_size, int y_frame_stride, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 block(y_frame_size, y_node_size); dim3 grid(1, 1); while ( block.y > 1 && block.x * block.y > 1024 ) { block.y = (block.y + 1) / 2; } grid.y = (y_node_size + (block.y - 1)) / block.y; while ( block.x > 1 && block.x * block.y > 1024 ) { block.x = (block.x + 1) / 2; } grid.x = (y_frame_size + (block.x - 1)) / block.x; kernal_fp32_BinaryToReal_Forward<<<grid, block, 0, streamId>>>( dev_x_buf, dev_y_buf, 1.0f / (node_mux_size * frame_mux_size), node_mux_size, frame_mux_size, y_node_size, x_frame_stride, y_frame_size, y_frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } //////////////// __global__ void kernal_bit_fp32_BinaryToReal_Forward( int const *x_buf, float *y_buf, float gain, int node_mux_size, int frame_mux_size, int y_node_size, int x_frame_stride, int y_frame_size, int y_frame_stride ) { int y_frame = blockDim.x * blockIdx.x + threadIdx.x; int y_node = blockDim.y * blockIdx.y + threadIdx.y; if (y_frame >= y_frame_size || y_node >= y_node_size) { return; } float sum = 0; int x_frame = y_frame * frame_mux_size; for ( int node = 0; node < node_mux_size; ++node ) { int x_node = y_node_size * node + y_node; for ( int frame = 0; frame < frame_mux_size; ++frame ) { int x_unit = ((x_frame + frame) >> 5); int x_bit = ((x_frame + frame) & 0x1f); int x_bit_mask = (1 << x_bit); int x = x_buf[x_node * x_frame_stride + x_unit]; if ( x & x_bit_mask ) { sum += 1.0; } } } y_buf[y_node * y_frame_stride + y_frame] = sum * gain; } BBCU_DLL_EXPORT int bbcu_bit_fp32_BinaryToReal_Forward ( int const *dev_x_buf, float *dev_y_buf, int node_mux_size, int frame_mux_size, int y_node_size, int x_frame_stride, int y_frame_size, int y_frame_stride, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 block(y_frame_size, y_node_size); dim3 grid(1, 1); while ( block.y > 1 && block.x * block.y > 1024 ) { block.y = (block.y + 1) / 2; } grid.y = (y_node_size + (block.y - 1)) / block.y; while ( block.x > 1 && block.x * block.y > 1024 ) { block.x = (block.x + 1) / 2; } grid.x = (y_frame_size + (block.x - 1)) / block.x; kernal_bit_fp32_BinaryToReal_Forward<<<grid, block, 0, streamId>>> ( dev_x_buf, dev_y_buf, 1.0f / (node_mux_size * frame_mux_size), node_mux_size, frame_mux_size, y_node_size, x_frame_stride, y_frame_size, y_frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } ////////////////////////////// // backward ////////////////////////////// __global__ void kernal_fp32_BinaryToReal_Backward( const float* dy_buf, float* dx_buf, float gain, int node_mux_size, int frame_mux_size, int y_node_size, int x_frame_stride, int y_frame_size, int y_frame_stride ) { int y_frame = blockDim.x * blockIdx.x + threadIdx.x; int y_node = blockDim.y * blockIdx.y + threadIdx.y; if (y_frame >= y_frame_size || y_node >= y_node_size) { return; } float dy = dy_buf[y_node * y_frame_stride + y_frame]; float val = dy * gain; int x_frame = y_frame * frame_mux_size; for ( int node = 0; node < node_mux_size; ++node ) { int x_node = y_node_size * node + y_node; for ( int frame = 0; frame < frame_mux_size; ++frame ) { dx_buf[x_node * x_frame_stride + x_frame + frame] = val; } } } BBCU_DLL_EXPORT int bbcu_fp32_BinaryToReal_Backward ( const float* dev_dy_buf, float* dev_dx_buf, int node_mux_size, int frame_mux_size, int y_node_size, int x_frame_stride, int y_frame_size, int y_frame_stride, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 block(y_frame_size, y_node_size); dim3 grid(1, 1); while ( block.y > 1 && block.x * block.y > 1024 ) { block.y = (block.y + 1) / 2; } grid.y = (y_node_size + (block.y - 1)) / block.y; while ( block.x > 1 && block.x * block.y > 1024 ) { block.x = (block.x + 1) / 2; } grid.x = (y_frame_size + (block.x - 1)) / block.x; kernal_fp32_BinaryToReal_Backward<<<grid, block, 0, streamId>>>( dev_dy_buf, dev_dx_buf, 1.0f / (node_mux_size * frame_mux_size), node_mux_size, frame_mux_size, y_node_size, x_frame_stride, y_frame_size, y_frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // end of file
b0e6f8bb876e1ffcb76ab81eb7081e03e4dbf125.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //n<=4096, m<=1024 __global__ void approxmatch(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match){ const int MaxN=4096,MaxM=1024; __shared__ float remainL[MaxN],remainR[MaxM],ratioR[MaxM],ratioL[MaxN]; __shared__ int listR[MaxM],lc; float multiL,multiR; if (n>=m){ multiL=1; multiR=n/m; }else{ multiL=m/n; multiR=1; } for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x;j<n*m;j+=blockDim.x) match[i*n*m+j]=0; for (int j=threadIdx.x;j<n;j+=blockDim.x) remainL[j]=multiL; for (int j=threadIdx.x;j<m;j+=blockDim.x) remainR[j]=multiR; __syncthreads(); for (int j=7;j>=-2;j--){ float level=-powf(4.0f,j); if (j==-2){ level=0; } if (threadIdx.x==0){ lc=0; for (int k=0;k<m;k++) if (remainR[k]>0) listR[lc++]=k; } __syncthreads(); int _lc=lc; for (int k=threadIdx.x;k<n;k+=blockDim.x){ float suml=1e-9f; float x1=xyz1[(i*n+k)*3+0]; float y1=xyz1[(i*n+k)*3+1]; float z1=xyz1[(i*n+k)*3+2]; //for (int l=0;l<m;l++){ for (int _l=0;_l<_lc;_l++){ int l=listR[_l]; float x2=xyz2[(i*m+l)*3+0]-x1; float y2=xyz2[(i*m+l)*3+1]-y1; float z2=xyz2[(i*m+l)*3+2]-z1; float w=expf(level*(x2*x2+y2*y2+z2*z2))*remainR[l]; suml+=w; } ratioL[k]=remainL[k]/suml; } __syncthreads(); //for (int k=threadIdx.x;k<m;k+=blockDim.x){ for (int _k=threadIdx.x;_k<lc;_k+=blockDim.x){ int k=listR[_k]; float sumr=0; float x2=xyz2[(i*m+k)*3+0]; float y2=xyz2[(i*m+k)*3+1]; float z2=xyz2[(i*m+k)*3+2]; for (int l=0;l<n;l++){ float x1=xyz1[(i*n+l)*3+0]-x2; float y1=xyz1[(i*n+l)*3+1]-y2; float z1=xyz1[(i*n+l)*3+2]-z2; float w=expf(level*(x1*x1+y1*y1+z1*z1))*ratioL[l]; sumr+=w; } sumr*=remainR[k]; float consumption=fminf(remainR[k]/(sumr+1e-9f),1.0f); ratioR[k]=consumption*remainR[k]; remainR[k]=fmaxf(0.0f,remainR[k]-sumr); } __syncthreads(); for (int k=threadIdx.x;k<n;k+=blockDim.x){ float suml=0; float x1=xyz1[(i*n+k)*3+0]; float y1=xyz1[(i*n+k)*3+1]; float z1=xyz1[(i*n+k)*3+2]; for (int _l=0;_l<_lc;_l++){ int l=listR[_l]; float x2=xyz2[(i*m+l)*3+0]-x1; float y2=xyz2[(i*m+l)*3+1]-y1; float z2=xyz2[(i*m+l)*3+2]-z1; float w=expf(level*(x2*x2+y2*y2+z2*z2))*ratioL[k]*ratioR[l]; match[i*n*m+l*n+k]+=w; suml+=w; } remainL[k]=fmaxf(0.0f,remainL[k]-suml); } __syncthreads(); } } } void approxmatchLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,float * match){ hipLaunchKernelGGL(( approxmatch), dim3(32),dim3(512), 0, 0, b,n,m,xyz1,xyz2,match); } __global__ void matchcost(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){ __shared__ float allsum[512]; const int Block=256; __shared__ float buf[Block*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ float subsum=0; for (int k0=0;k0<m;k0+=Block){ int endk=min(m,k0+Block); for (int k=threadIdx.x;k<(endk-k0)*3;k+=blockDim.x){ buf[k]=xyz2[i*m*3+k0*3+k]; } __syncthreads(); for (int j=threadIdx.x;j<n;j+=blockDim.x){ float x1=xyz1[(i*n+j)*3+0]; float y1=xyz1[(i*n+j)*3+1]; float z1=xyz1[(i*n+j)*3+2]; for (int k=0;k<endk-k0;k++){ //float x2=xyz2[(i*m+k)*3+0]-x1; //float y2=xyz2[(i*m+k)*3+1]-y1; //float z2=xyz2[(i*m+k)*3+2]-z1; float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=sqrtf(x2*x2+y2*y2+z2*z2); subsum+=match[i*n*m+(k0+k)*n+j]*d; } } __syncthreads(); } allsum[threadIdx.x]=subsum; for (int j=1;j<blockDim.x;j<<=1){ __syncthreads(); if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){ allsum[threadIdx.x]+=allsum[threadIdx.x+j]; } } if (threadIdx.x==0) out[i]=allsum[0]; __syncthreads(); } } void matchcostLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out){ hipLaunchKernelGGL(( matchcost), dim3(32),dim3(512), 0, 0, b,n,m,xyz1,xyz2,match,out); } __global__ void matchcostgrad(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * grad2){ __shared__ float sum_grad[256*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ int kbeg=m*blockIdx.y/gridDim.y; int kend=m*(blockIdx.y+1)/gridDim.y; for (int k=kbeg;k<kend;k++){ float x2=xyz2[(i*m+k)*3+0]; float y2=xyz2[(i*m+k)*3+1]; float z2=xyz2[(i*m+k)*3+2]; float subsumx=0,subsumy=0,subsumz=0; for (int j=threadIdx.x;j<n;j+=blockDim.x){ float x1=x2-xyz1[(i*n+j)*3+0]; float y1=y2-xyz1[(i*n+j)*3+1]; float z1=z2-xyz1[(i*n+j)*3+2]; float d=match[i*n*m+k*n+j]/fmaxf(sqrtf(x1*x1+y1*y1+z1*z1),1e-20f); subsumx+=x1*d; subsumy+=y1*d; subsumz+=z1*d; } sum_grad[threadIdx.x*3+0]=subsumx; sum_grad[threadIdx.x*3+1]=subsumy; sum_grad[threadIdx.x*3+2]=subsumz; for (int j=1;j<blockDim.x;j<<=1){ __syncthreads(); int j1=threadIdx.x; int j2=threadIdx.x+j; if ((j1&j)==0 && j2<blockDim.x){ sum_grad[j1*3+0]+=sum_grad[j2*3+0]; sum_grad[j1*3+1]+=sum_grad[j2*3+1]; sum_grad[j1*3+2]+=sum_grad[j2*3+2]; } } if (threadIdx.x==0){ grad2[(i*m+k)*3+0]=sum_grad[0]; grad2[(i*m+k)*3+1]=sum_grad[1]; grad2[(i*m+k)*3+2]=sum_grad[2]; } __syncthreads(); } } } void matchcostgradLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad2){ hipLaunchKernelGGL(( matchcostgrad), dim3(dim3(32,32)),dim3(256), 0, 0, b,n,m,xyz1,xyz2,match,grad2); }
b0e6f8bb876e1ffcb76ab81eb7081e03e4dbf125.cu
//n<=4096, m<=1024 __global__ void approxmatch(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match){ const int MaxN=4096,MaxM=1024; __shared__ float remainL[MaxN],remainR[MaxM],ratioR[MaxM],ratioL[MaxN]; __shared__ int listR[MaxM],lc; float multiL,multiR; if (n>=m){ multiL=1; multiR=n/m; }else{ multiL=m/n; multiR=1; } for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x;j<n*m;j+=blockDim.x) match[i*n*m+j]=0; for (int j=threadIdx.x;j<n;j+=blockDim.x) remainL[j]=multiL; for (int j=threadIdx.x;j<m;j+=blockDim.x) remainR[j]=multiR; __syncthreads(); for (int j=7;j>=-2;j--){ float level=-powf(4.0f,j); if (j==-2){ level=0; } if (threadIdx.x==0){ lc=0; for (int k=0;k<m;k++) if (remainR[k]>0) listR[lc++]=k; } __syncthreads(); int _lc=lc; for (int k=threadIdx.x;k<n;k+=blockDim.x){ float suml=1e-9f; float x1=xyz1[(i*n+k)*3+0]; float y1=xyz1[(i*n+k)*3+1]; float z1=xyz1[(i*n+k)*3+2]; //for (int l=0;l<m;l++){ for (int _l=0;_l<_lc;_l++){ int l=listR[_l]; float x2=xyz2[(i*m+l)*3+0]-x1; float y2=xyz2[(i*m+l)*3+1]-y1; float z2=xyz2[(i*m+l)*3+2]-z1; float w=expf(level*(x2*x2+y2*y2+z2*z2))*remainR[l]; suml+=w; } ratioL[k]=remainL[k]/suml; } __syncthreads(); //for (int k=threadIdx.x;k<m;k+=blockDim.x){ for (int _k=threadIdx.x;_k<lc;_k+=blockDim.x){ int k=listR[_k]; float sumr=0; float x2=xyz2[(i*m+k)*3+0]; float y2=xyz2[(i*m+k)*3+1]; float z2=xyz2[(i*m+k)*3+2]; for (int l=0;l<n;l++){ float x1=xyz1[(i*n+l)*3+0]-x2; float y1=xyz1[(i*n+l)*3+1]-y2; float z1=xyz1[(i*n+l)*3+2]-z2; float w=expf(level*(x1*x1+y1*y1+z1*z1))*ratioL[l]; sumr+=w; } sumr*=remainR[k]; float consumption=fminf(remainR[k]/(sumr+1e-9f),1.0f); ratioR[k]=consumption*remainR[k]; remainR[k]=fmaxf(0.0f,remainR[k]-sumr); } __syncthreads(); for (int k=threadIdx.x;k<n;k+=blockDim.x){ float suml=0; float x1=xyz1[(i*n+k)*3+0]; float y1=xyz1[(i*n+k)*3+1]; float z1=xyz1[(i*n+k)*3+2]; for (int _l=0;_l<_lc;_l++){ int l=listR[_l]; float x2=xyz2[(i*m+l)*3+0]-x1; float y2=xyz2[(i*m+l)*3+1]-y1; float z2=xyz2[(i*m+l)*3+2]-z1; float w=expf(level*(x2*x2+y2*y2+z2*z2))*ratioL[k]*ratioR[l]; match[i*n*m+l*n+k]+=w; suml+=w; } remainL[k]=fmaxf(0.0f,remainL[k]-suml); } __syncthreads(); } } } void approxmatchLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,float * match){ approxmatch<<<32,512>>>(b,n,m,xyz1,xyz2,match); } __global__ void matchcost(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){ __shared__ float allsum[512]; const int Block=256; __shared__ float buf[Block*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ float subsum=0; for (int k0=0;k0<m;k0+=Block){ int endk=min(m,k0+Block); for (int k=threadIdx.x;k<(endk-k0)*3;k+=blockDim.x){ buf[k]=xyz2[i*m*3+k0*3+k]; } __syncthreads(); for (int j=threadIdx.x;j<n;j+=blockDim.x){ float x1=xyz1[(i*n+j)*3+0]; float y1=xyz1[(i*n+j)*3+1]; float z1=xyz1[(i*n+j)*3+2]; for (int k=0;k<endk-k0;k++){ //float x2=xyz2[(i*m+k)*3+0]-x1; //float y2=xyz2[(i*m+k)*3+1]-y1; //float z2=xyz2[(i*m+k)*3+2]-z1; float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=sqrtf(x2*x2+y2*y2+z2*z2); subsum+=match[i*n*m+(k0+k)*n+j]*d; } } __syncthreads(); } allsum[threadIdx.x]=subsum; for (int j=1;j<blockDim.x;j<<=1){ __syncthreads(); if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){ allsum[threadIdx.x]+=allsum[threadIdx.x+j]; } } if (threadIdx.x==0) out[i]=allsum[0]; __syncthreads(); } } void matchcostLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out){ matchcost<<<32,512>>>(b,n,m,xyz1,xyz2,match,out); } __global__ void matchcostgrad(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * grad2){ __shared__ float sum_grad[256*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ int kbeg=m*blockIdx.y/gridDim.y; int kend=m*(blockIdx.y+1)/gridDim.y; for (int k=kbeg;k<kend;k++){ float x2=xyz2[(i*m+k)*3+0]; float y2=xyz2[(i*m+k)*3+1]; float z2=xyz2[(i*m+k)*3+2]; float subsumx=0,subsumy=0,subsumz=0; for (int j=threadIdx.x;j<n;j+=blockDim.x){ float x1=x2-xyz1[(i*n+j)*3+0]; float y1=y2-xyz1[(i*n+j)*3+1]; float z1=z2-xyz1[(i*n+j)*3+2]; float d=match[i*n*m+k*n+j]/fmaxf(sqrtf(x1*x1+y1*y1+z1*z1),1e-20f); subsumx+=x1*d; subsumy+=y1*d; subsumz+=z1*d; } sum_grad[threadIdx.x*3+0]=subsumx; sum_grad[threadIdx.x*3+1]=subsumy; sum_grad[threadIdx.x*3+2]=subsumz; for (int j=1;j<blockDim.x;j<<=1){ __syncthreads(); int j1=threadIdx.x; int j2=threadIdx.x+j; if ((j1&j)==0 && j2<blockDim.x){ sum_grad[j1*3+0]+=sum_grad[j2*3+0]; sum_grad[j1*3+1]+=sum_grad[j2*3+1]; sum_grad[j1*3+2]+=sum_grad[j2*3+2]; } } if (threadIdx.x==0){ grad2[(i*m+k)*3+0]=sum_grad[0]; grad2[(i*m+k)*3+1]=sum_grad[1]; grad2[(i*m+k)*3+2]=sum_grad[2]; } __syncthreads(); } } } void matchcostgradLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad2){ matchcostgrad<<<dim3(32,32),256>>>(b,n,m,xyz1,xyz2,match,grad2); }
084bc418589a65c33f2f94bdf53a0e706af5ed8a.hip
// !!! This is a file automatically generated by hipify!!! //use shared memory #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <chrono> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <iostream> #include <ctime> #include <vector> using namespace std; #define MESH_W 4 #define MESH_H 4 #define M (MESH_W+1)*(MESH_H+1) //size of matrix A M by N #define N (MESH_W+1)*(MESH_H+1) #define NE 2*MESH_W*MESH_H //number of elements #define BLOCK_X 7 // number of integration points #define BLOCK_Y 9 // number of expressions #define BLOCK_Z ((int)(32*32)/(BLOCK_X*BLOCK_Y)) //number of elements in a block #define NDOF 3 //number of DOFs #define NNODE 3 //number of nodes __constant__ float triW[7] = { 0.06296959f, 0.06619708f, 0.06296959f, 0.06619708f, 0.06296959f, 0.06619708f, 0.11250000f }; __constant__ float triR[7] = { 0.10128651f, 0.47014206f, 0.79742699f, 0.47014206f, 0.10128651f, 0.05971587f, 0.33333333f }; __constant__ float triS[7] = { 0.10128651f, 0.05971587f, 0.10128651f, 0.47014206f, 0.79742699f, 0.47014206f, 0.33333333f }; __constant__ float triT[7] = { 0.79742698f, 0.47014207f, 0.1012865f, 0.05971588f, 0.1012865f, 0.47014207f, 0.33333334f }; class Node { public: double x, y, z; int flag; //boundary flag int index; //global index of node }; class Element { public: vector<Node*> nodes; }; class Mesh { public: vector<Node*> nodes; vector<Element*> elements; void printMesh() { cout << "number of nodes = " << nodes.size() << endl; for(int i=0; i<nodes.size(); i++) { Node *node = nodes[i]; cout << node->index << " " <<node->x << " " << node->y << " " << node->flag << endl; } cout << "number of elements = " <<elements.size() << endl; for(int i=0; i<elements.size(); i++) { Element *e = elements[i]; cout << e->nodes[0]->index << " " << e->nodes[1]->index << " " << e->nodes[2]->index << endl; } } }; class RectangleMesh : public Mesh { public: double x0,x1,y0,y1; int nRow, nCol; RectangleMesh(double x0, double x1, double y0, double y1, int nRow, int nCol) { this->x0 = x0; this->x1 = x1; this->y0 = y0; this->y1 = y1; this->nRow = nRow; this->nCol = nCol; generate(); } void generate() { double stepx = (x1-x0)/nCol; double stepy = (y1-y0)/nRow; //generate nodes for(int i=0; i<=nRow; i++) { double y = y0+i*stepy; for(int j=0; j<=nCol; j++) { double x = x0+j*stepx; Node *node = new Node(); node->x = x; node->y = y; if(i==0 || i==nRow || j==0 || j==nCol) node->flag = 1; //on the bounday else node->flag = 0; node->index = i*(nCol+1) + j; nodes.push_back(node); } } //generate elements for(int i=0; i<nRow; i++) { for(int j=0; j<nCol; j++) { Element *e = new Element(); int n1 = i*(nCol+1) + j; int n2 = n1 + 1; int n3 = (i+1)*(nCol+1) + j; e->nodes.push_back(nodes[n1]); e->nodes.push_back(nodes[n2]); e->nodes.push_back(nodes[n3]); elements.push_back(e); e = new Element(); n1 = i*(nCol+1) + j + 1; n2 = (i+1)*(nCol+1) + j+ 1; n3 = n2 - 1; e->nodes.push_back(nodes[n1]); e->nodes.push_back(nodes[n2]); e->nodes.push_back(nodes[n3]); elements.push_back(e); } } } }; class UnitSquareMesh : public RectangleMesh { public: UnitSquareMesh(int nRow, int nCol) : RectangleMesh(0.0,1.0,0.0,1.0,nRow,nCol) {} }; //This function should be generated from the symbol expressions of the integrand __device__ float integrand(int funIdx, float *params) { float x1 = params[0]; float x2 = params[1]; float x3 = params[2]; float y1 = params[3]; float y2 = params[4]; float y3 = params[5]; float r = params[6]; float s = params[7]; float t = params[8]; //printf("%d %f %f %f %f %f %f %f %f %f\n", funIdx, x1,x2,x3,y1,y2,y3,r,s,t); //x = x1*r + x2*s + x3*t //y = y1*r + y2*s + y3*t float xr = x1-x3; float xs = x2-x3; float yr = y1-y3; float ys = y2-y3; // jac = (xr xs) // (yr ys) float jac = xr*ys-xs*yr; float rx = ys/jac; float ry = -xs/jac; float sx = -yr/jac; float sy = xr/jac; float tx = -rx - sx; float ty = -ry - sy; // float[][] lhs = { // {rx*rx + ry*ry, rx*sx + ry*sy, rx*tx + ry*ty}, // {sx*rx + sy*ry, sx*sx + sy*sy, sx*tx + sy*ty}, // {tx*rx + ty*ry, tx*sx + ty*sy, tx*tx + ty*ty}, // }; if(funIdx == 0) return (rx*rx + ry*ry)*jac*0.5; if(funIdx == 1) return (rx*sx + ry*sy)*jac*0.5; if(funIdx == 2) return (rx*tx + ry*ty)*jac*0.5; if(funIdx == 3) return (sx*rx + sy*ry)*jac*0.5; if(funIdx == 4) return (sx*sx + sy*sy)*jac*0.5; if(funIdx == 5) return (sx*tx + sy*ty)*jac*0.5; if(funIdx == 6) return (tx*rx + ty*ry)*jac*0.5; if(funIdx == 7) return (tx*sx + ty*sy)*jac*0.5; if(funIdx == 8) return (tx*tx + ty*ty)*jac*0.5; return 0.0f; } //Version 3: use shared memory __global__ void fea_kernel(float* A, float *X, float *Y, // (x,y) of each element for all the element int *gIdx // node index of each element for all the element ) { __shared__ float sX[BLOCK_Z*NNODE]; //shared memory of copy of X in the current block __shared__ float sY[BLOCK_Z*NNODE]; //shared memory of copy of Y in the current block __shared__ float sGIdx[BLOCK_Z*NNODE]; //shared memory of copy of gIdx in the current block int gEleIdx = BLOCK_Z*blockIdx.x + threadIdx.z; //global element index int sEleIdx = NNODE*threadIdx.z; //global element index in shared memory arrays: sX,sY,sGIdx // copy from global memory to shared memory for X, Y and gIdx if(threadIdx.x==0 && threadIdx.y==0) { #pragma unroll for(int i=0; i<NNODE; i++) sX[sEleIdx+i]=X[NNODE*gEleIdx+i]; #pragma unroll for(int i=0; i<NNODE; i++) sY[sEleIdx+i]=Y[NNODE*gEleIdx+i]; #pragma unroll for(int i=0; i<NNODE; i++) sGIdx[sEleIdx+i]=gIdx[NNODE*gEleIdx+i]; } __syncthreads(); //local matrix row and column index //threadIdx.y = 0,1,2,3,4,5,6,7,8 (BLOCK_Y) int li = threadIdx.y / NDOF; int lj = threadIdx.y % NDOF; __shared__ float localFlatMatrix[BLOCK_Y*BLOCK_Z]; //array for the local flat matrices of all the elememnts in the current block int lfmIdx = threadIdx.z*BLOCK_Y + threadIdx.y; //local flat matrix index of the integrand of threadIdx.y float params[3*NNODE]; //parameters array of integrand //compute local matrix if(gEleIdx < NE) { #pragma unroll for(int i=0; i<NNODE; i++) params[i] = sX[sEleIdx+i]; #pragma unroll for(int i=0; i<NNODE; i++) params[NNODE+i] = sY[sEleIdx+i]; params[2*NNODE+0] = triR[threadIdx.x]; params[2*NNODE+1] = triS[threadIdx.x]; params[2*NNODE+2] = triT[threadIdx.x]; //triT[threadIdx.x]=1.0-triR[threadIdx.x]-triS[threadIdx.x]; atomicAdd( &localFlatMatrix[lfmIdx], triW[threadIdx.x]*integrand(threadIdx.y, params) ); } __syncthreads(); //write to gobal matrix A if(gEleIdx < NE) { if(threadIdx.x == 0) { //global matrix row and column index int gi = sGIdx[sEleIdx + li]; int gj = sGIdx[sEleIdx + lj]; atomicAdd( &A[N*gj + gi], localFlatMatrix[lfmIdx] ); } } } hipError_t assembleWithCuda() { dim3 dim_block; hipError_t cudaStatus; hipEvent_t start, stop; float elapsed = 0; dim_block.x = BLOCK_X; dim_block.y = BLOCK_Y; dim_block.z = BLOCK_Z; printf("block_x:%d, block_y:%d, block_z:%d\n", dim_block.x, dim_block.y, dim_block.z); hipSetDevice(0); RectangleMesh mesh(-3.0, 3.0, -3.0, 3.0, MESH_W, MESH_H); mesh.printMesh(); float *A = (float*)malloc( M*N*sizeof(float) ); float *X = (float*)malloc( NE*NNODE*sizeof(float) ); float *Y = (float*)malloc( NE*NNODE*sizeof(float) ); int *gIdx = (int*)malloc( NE*NNODE*sizeof(int) ); for(int i=0; i<M*N; i++) A[i] = 0.0f; for(int i=0; i<mesh.elements.size(); i++) { Element *e = mesh.elements[i]; X[NNODE*i+0] = e->nodes[0]->x; X[NNODE*i+1] = e->nodes[1]->x; X[NNODE*i+2] = e->nodes[2]->x; Y[NNODE*i+0] = e->nodes[0]->y; Y[NNODE*i+1] = e->nodes[1]->y; Y[NNODE*i+2] = e->nodes[2]->y; gIdx[NNODE*i+0] = e->nodes[0]->index; gIdx[NNODE*i+1] = e->nodes[1]->index; gIdx[NNODE*i+2] = e->nodes[2]->index; } float *dA = NULL; hipMalloc((void**)&dA, M*N*sizeof(float)); float *dX = NULL; hipMalloc((void**)&dX, NE*NNODE*sizeof(float)); float *dY = NULL; hipMalloc((void**)&dY, NE*NNODE*sizeof(float)); int *dGIdx = NULL; hipMalloc((void**)&dGIdx, NE*NNODE*sizeof(int)); hipMemcpy(dA, A, M*N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dX, X, NE*NNODE*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dY, Y, NE*NNODE*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dGIdx, gIdx, NE*NNODE*sizeof(int), hipMemcpyHostToDevice); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); fea_kernel << <2, dim_block >> >(dA, dX, dY, dGIdx); //bugfix 1 => 2 hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); printf("GPU Time: %f ms\n", elapsed); hipDeviceSynchronize(); cudaStatus = hipMemcpy(A, dA, M*N*sizeof(float), hipMemcpyDeviceToHost); for(int i=0; i<M; i++) { for(int j=0; j<N; j++) { printf("%f ", A[i*N+j]); } printf("\n"); } hipFree(dA); hipFree(dX); hipFree(dY); hipFree(dGIdx); return cudaStatus; } int main() { assembleWithCuda(); hipDeviceReset(); return 0; }
084bc418589a65c33f2f94bdf53a0e706af5ed8a.cu
//use shared memory #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <chrono> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <iostream> #include <ctime> #include <vector> using namespace std; #define MESH_W 4 #define MESH_H 4 #define M (MESH_W+1)*(MESH_H+1) //size of matrix A M by N #define N (MESH_W+1)*(MESH_H+1) #define NE 2*MESH_W*MESH_H //number of elements #define BLOCK_X 7 // number of integration points #define BLOCK_Y 9 // number of expressions #define BLOCK_Z ((int)(32*32)/(BLOCK_X*BLOCK_Y)) //number of elements in a block #define NDOF 3 //number of DOFs #define NNODE 3 //number of nodes __constant__ float triW[7] = { 0.06296959f, 0.06619708f, 0.06296959f, 0.06619708f, 0.06296959f, 0.06619708f, 0.11250000f }; __constant__ float triR[7] = { 0.10128651f, 0.47014206f, 0.79742699f, 0.47014206f, 0.10128651f, 0.05971587f, 0.33333333f }; __constant__ float triS[7] = { 0.10128651f, 0.05971587f, 0.10128651f, 0.47014206f, 0.79742699f, 0.47014206f, 0.33333333f }; __constant__ float triT[7] = { 0.79742698f, 0.47014207f, 0.1012865f, 0.05971588f, 0.1012865f, 0.47014207f, 0.33333334f }; class Node { public: double x, y, z; int flag; //boundary flag int index; //global index of node }; class Element { public: vector<Node*> nodes; }; class Mesh { public: vector<Node*> nodes; vector<Element*> elements; void printMesh() { cout << "number of nodes = " << nodes.size() << endl; for(int i=0; i<nodes.size(); i++) { Node *node = nodes[i]; cout << node->index << " " <<node->x << " " << node->y << " " << node->flag << endl; } cout << "number of elements = " <<elements.size() << endl; for(int i=0; i<elements.size(); i++) { Element *e = elements[i]; cout << e->nodes[0]->index << " " << e->nodes[1]->index << " " << e->nodes[2]->index << endl; } } }; class RectangleMesh : public Mesh { public: double x0,x1,y0,y1; int nRow, nCol; RectangleMesh(double x0, double x1, double y0, double y1, int nRow, int nCol) { this->x0 = x0; this->x1 = x1; this->y0 = y0; this->y1 = y1; this->nRow = nRow; this->nCol = nCol; generate(); } void generate() { double stepx = (x1-x0)/nCol; double stepy = (y1-y0)/nRow; //generate nodes for(int i=0; i<=nRow; i++) { double y = y0+i*stepy; for(int j=0; j<=nCol; j++) { double x = x0+j*stepx; Node *node = new Node(); node->x = x; node->y = y; if(i==0 || i==nRow || j==0 || j==nCol) node->flag = 1; //on the bounday else node->flag = 0; node->index = i*(nCol+1) + j; nodes.push_back(node); } } //generate elements for(int i=0; i<nRow; i++) { for(int j=0; j<nCol; j++) { Element *e = new Element(); int n1 = i*(nCol+1) + j; int n2 = n1 + 1; int n3 = (i+1)*(nCol+1) + j; e->nodes.push_back(nodes[n1]); e->nodes.push_back(nodes[n2]); e->nodes.push_back(nodes[n3]); elements.push_back(e); e = new Element(); n1 = i*(nCol+1) + j + 1; n2 = (i+1)*(nCol+1) + j+ 1; n3 = n2 - 1; e->nodes.push_back(nodes[n1]); e->nodes.push_back(nodes[n2]); e->nodes.push_back(nodes[n3]); elements.push_back(e); } } } }; class UnitSquareMesh : public RectangleMesh { public: UnitSquareMesh(int nRow, int nCol) : RectangleMesh(0.0,1.0,0.0,1.0,nRow,nCol) {} }; //This function should be generated from the symbol expressions of the integrand __device__ float integrand(int funIdx, float *params) { float x1 = params[0]; float x2 = params[1]; float x3 = params[2]; float y1 = params[3]; float y2 = params[4]; float y3 = params[5]; float r = params[6]; float s = params[7]; float t = params[8]; //printf("%d %f %f %f %f %f %f %f %f %f\n", funIdx, x1,x2,x3,y1,y2,y3,r,s,t); //x = x1*r + x2*s + x3*t //y = y1*r + y2*s + y3*t float xr = x1-x3; float xs = x2-x3; float yr = y1-y3; float ys = y2-y3; // jac = (xr xs) // (yr ys) float jac = xr*ys-xs*yr; float rx = ys/jac; float ry = -xs/jac; float sx = -yr/jac; float sy = xr/jac; float tx = -rx - sx; float ty = -ry - sy; // float[][] lhs = { // {rx*rx + ry*ry, rx*sx + ry*sy, rx*tx + ry*ty}, // {sx*rx + sy*ry, sx*sx + sy*sy, sx*tx + sy*ty}, // {tx*rx + ty*ry, tx*sx + ty*sy, tx*tx + ty*ty}, // }; if(funIdx == 0) return (rx*rx + ry*ry)*jac*0.5; if(funIdx == 1) return (rx*sx + ry*sy)*jac*0.5; if(funIdx == 2) return (rx*tx + ry*ty)*jac*0.5; if(funIdx == 3) return (sx*rx + sy*ry)*jac*0.5; if(funIdx == 4) return (sx*sx + sy*sy)*jac*0.5; if(funIdx == 5) return (sx*tx + sy*ty)*jac*0.5; if(funIdx == 6) return (tx*rx + ty*ry)*jac*0.5; if(funIdx == 7) return (tx*sx + ty*sy)*jac*0.5; if(funIdx == 8) return (tx*tx + ty*ty)*jac*0.5; return 0.0f; } //Version 3: use shared memory __global__ void fea_kernel(float* A, float *X, float *Y, // (x,y) of each element for all the element int *gIdx // node index of each element for all the element ) { __shared__ float sX[BLOCK_Z*NNODE]; //shared memory of copy of X in the current block __shared__ float sY[BLOCK_Z*NNODE]; //shared memory of copy of Y in the current block __shared__ float sGIdx[BLOCK_Z*NNODE]; //shared memory of copy of gIdx in the current block int gEleIdx = BLOCK_Z*blockIdx.x + threadIdx.z; //global element index int sEleIdx = NNODE*threadIdx.z; //global element index in shared memory arrays: sX,sY,sGIdx // copy from global memory to shared memory for X, Y and gIdx if(threadIdx.x==0 && threadIdx.y==0) { #pragma unroll for(int i=0; i<NNODE; i++) sX[sEleIdx+i]=X[NNODE*gEleIdx+i]; #pragma unroll for(int i=0; i<NNODE; i++) sY[sEleIdx+i]=Y[NNODE*gEleIdx+i]; #pragma unroll for(int i=0; i<NNODE; i++) sGIdx[sEleIdx+i]=gIdx[NNODE*gEleIdx+i]; } __syncthreads(); //local matrix row and column index //threadIdx.y = 0,1,2,3,4,5,6,7,8 (BLOCK_Y) int li = threadIdx.y / NDOF; int lj = threadIdx.y % NDOF; __shared__ float localFlatMatrix[BLOCK_Y*BLOCK_Z]; //array for the local flat matrices of all the elememnts in the current block int lfmIdx = threadIdx.z*BLOCK_Y + threadIdx.y; //local flat matrix index of the integrand of threadIdx.y float params[3*NNODE]; //parameters array of integrand //compute local matrix if(gEleIdx < NE) { #pragma unroll for(int i=0; i<NNODE; i++) params[i] = sX[sEleIdx+i]; #pragma unroll for(int i=0; i<NNODE; i++) params[NNODE+i] = sY[sEleIdx+i]; params[2*NNODE+0] = triR[threadIdx.x]; params[2*NNODE+1] = triS[threadIdx.x]; params[2*NNODE+2] = triT[threadIdx.x]; //triT[threadIdx.x]=1.0-triR[threadIdx.x]-triS[threadIdx.x]; atomicAdd( &localFlatMatrix[lfmIdx], triW[threadIdx.x]*integrand(threadIdx.y, params) ); } __syncthreads(); //write to gobal matrix A if(gEleIdx < NE) { if(threadIdx.x == 0) { //global matrix row and column index int gi = sGIdx[sEleIdx + li]; int gj = sGIdx[sEleIdx + lj]; atomicAdd( &A[N*gj + gi], localFlatMatrix[lfmIdx] ); } } } cudaError_t assembleWithCuda() { dim3 dim_block; cudaError_t cudaStatus; cudaEvent_t start, stop; float elapsed = 0; dim_block.x = BLOCK_X; dim_block.y = BLOCK_Y; dim_block.z = BLOCK_Z; printf("block_x:%d, block_y:%d, block_z:%d\n", dim_block.x, dim_block.y, dim_block.z); cudaSetDevice(0); RectangleMesh mesh(-3.0, 3.0, -3.0, 3.0, MESH_W, MESH_H); mesh.printMesh(); float *A = (float*)malloc( M*N*sizeof(float) ); float *X = (float*)malloc( NE*NNODE*sizeof(float) ); float *Y = (float*)malloc( NE*NNODE*sizeof(float) ); int *gIdx = (int*)malloc( NE*NNODE*sizeof(int) ); for(int i=0; i<M*N; i++) A[i] = 0.0f; for(int i=0; i<mesh.elements.size(); i++) { Element *e = mesh.elements[i]; X[NNODE*i+0] = e->nodes[0]->x; X[NNODE*i+1] = e->nodes[1]->x; X[NNODE*i+2] = e->nodes[2]->x; Y[NNODE*i+0] = e->nodes[0]->y; Y[NNODE*i+1] = e->nodes[1]->y; Y[NNODE*i+2] = e->nodes[2]->y; gIdx[NNODE*i+0] = e->nodes[0]->index; gIdx[NNODE*i+1] = e->nodes[1]->index; gIdx[NNODE*i+2] = e->nodes[2]->index; } float *dA = NULL; cudaMalloc((void**)&dA, M*N*sizeof(float)); float *dX = NULL; cudaMalloc((void**)&dX, NE*NNODE*sizeof(float)); float *dY = NULL; cudaMalloc((void**)&dY, NE*NNODE*sizeof(float)); int *dGIdx = NULL; cudaMalloc((void**)&dGIdx, NE*NNODE*sizeof(int)); cudaMemcpy(dA, A, M*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dX, X, NE*NNODE*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dY, Y, NE*NNODE*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dGIdx, gIdx, NE*NNODE*sizeof(int), cudaMemcpyHostToDevice); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); fea_kernel << <2, dim_block >> >(dA, dX, dY, dGIdx); //bugfix 1 => 2 cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); printf("GPU Time: %f ms\n", elapsed); cudaDeviceSynchronize(); cudaStatus = cudaMemcpy(A, dA, M*N*sizeof(float), cudaMemcpyDeviceToHost); for(int i=0; i<M; i++) { for(int j=0; j<N; j++) { printf("%f ", A[i*N+j]); } printf("\n"); } cudaFree(dA); cudaFree(dX); cudaFree(dY); cudaFree(dGIdx); return cudaStatus; } int main() { assembleWithCuda(); cudaDeviceReset(); return 0; }
4c9e975e2ae0399988d1585cd6e6a948a11816e4.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <string> #include <stdexcept> #include <random> #include <thrust/functional.h> // for binary_function #include <thrust/host_vector.h> // for binary_function #include <thrust/device_vector.h> // for binary_function typedef float (*nvstdfunction)(float x,float y); int _M; #define MEMCPY_TO_SYMBOL(target, source, count, offset, direction) \ do { hipError_t ret = hipMemcpyToSymbol(target, source, count, offset, direction); \ if(ret!=hipSuccess) throw runtime_error(string(hipGetErrorString(ret))); } while(0) #define TEST(OP_PTR) \ do { void *address; \ hipError_t ret = hipGetSymbolAddress(&address, OP_PTR ## _ptr); \ if(ret!=hipSuccess) { std::cout<<"on line "<<__LINE__<<std::endl; throw std::runtime_error(std::string(hipGetErrorString(ret))); } \ test(new BinaryOp(address)); } while (0); #include "fun.icc" #define REGISTER(fun) \ __device__ nvstdfunction fun ## _ptr = fun; REGISTER(logLLdev) REGISTER(RPFdev) REGISTER(MVLLdev20) REGISTER(MVLLdev50) REGISTER(MVLLdev70) struct BinaryOp: public thrust::binary_function<float,float,float> { BinaryOp(void *address) { hipError_t ret = hipMemcpy(&m_f, address, sizeof(void*),hipMemcpyDeviceToHost); if(ret!=hipSuccess) throw std::runtime_error(std::string(hipGetErrorString(ret))); } __device__ float operator()(float x,float y) const { return (*(reinterpret_cast<nvstdfunction>(m_f)))(x,y); } void *m_f; }; void test ( struct BinaryOp*op) { std::random_device r; std::default_random_engine e1(r()); std::uniform_real_distribution<float> uniform_dist(100, 600); thrust::host_vector<float> x_th_host_v; thrust::host_vector<float> y_th_host_v; for (int i = 0; i < _M; i++) { x_th_host_v.push_back(uniform_dist(e1)); y_th_host_v.push_back(2.0f); } thrust::device_vector<float> x_th_dev_v(x_th_host_v); thrust::device_vector<float> y_th_dev_v(y_th_host_v); thrust::transform(x_th_dev_v.begin(),x_th_dev_v.end(),y_th_dev_v.begin(),y_th_dev_v.begin(),*op); y_th_host_v = y_th_dev_v; // std::cout<<y_th_host_v[0]<<std::endl; } void logLL() { TEST(logLLdev) } void RPF() { TEST(RPFdev) } void MVLL20() { TEST(MVLLdev20) } void MVLL50() { TEST(MVLLdev50) } void MVLL70() { TEST(MVLLdev70) }
4c9e975e2ae0399988d1585cd6e6a948a11816e4.cu
#include <iostream> #include <string> #include <stdexcept> #include <random> #include <thrust/functional.h> // for binary_function #include <thrust/host_vector.h> // for binary_function #include <thrust/device_vector.h> // for binary_function typedef float (*nvstdfunction)(float x,float y); int _M; #define MEMCPY_TO_SYMBOL(target, source, count, offset, direction) \ do { cudaError_t ret = cudaMemcpyToSymbol(target, source, count, offset, direction); \ if(ret!=cudaSuccess) throw runtime_error(string(cudaGetErrorString(ret))); } while(0) #define TEST(OP_PTR) \ do { void *address; \ cudaError_t ret = cudaGetSymbolAddress(&address, OP_PTR ## _ptr); \ if(ret!=cudaSuccess) { std::cout<<"on line "<<__LINE__<<std::endl; throw std::runtime_error(std::string(cudaGetErrorString(ret))); } \ test(new BinaryOp(address)); } while (0); #include "fun.icc" #define REGISTER(fun) \ __device__ nvstdfunction fun ## _ptr = fun; REGISTER(logLLdev) REGISTER(RPFdev) REGISTER(MVLLdev20) REGISTER(MVLLdev50) REGISTER(MVLLdev70) struct BinaryOp: public thrust::binary_function<float,float,float> { BinaryOp(void *address) { cudaError_t ret = cudaMemcpy(&m_f, address, sizeof(void*),cudaMemcpyDeviceToHost); if(ret!=cudaSuccess) throw std::runtime_error(std::string(cudaGetErrorString(ret))); } __device__ float operator()(float x,float y) const { return (*(reinterpret_cast<nvstdfunction>(m_f)))(x,y); } void *m_f; }; void test ( struct BinaryOp*op) { std::random_device r; std::default_random_engine e1(r()); std::uniform_real_distribution<float> uniform_dist(100, 600); thrust::host_vector<float> x_th_host_v; thrust::host_vector<float> y_th_host_v; for (int i = 0; i < _M; i++) { x_th_host_v.push_back(uniform_dist(e1)); y_th_host_v.push_back(2.0f); } thrust::device_vector<float> x_th_dev_v(x_th_host_v); thrust::device_vector<float> y_th_dev_v(y_th_host_v); thrust::transform(x_th_dev_v.begin(),x_th_dev_v.end(),y_th_dev_v.begin(),y_th_dev_v.begin(),*op); y_th_host_v = y_th_dev_v; // std::cout<<y_th_host_v[0]<<std::endl; } void logLL() { TEST(logLLdev) } void RPF() { TEST(RPFdev) } void MVLL20() { TEST(MVLLdev20) } void MVLL50() { TEST(MVLLdev50) } void MVLL70() { TEST(MVLLdev70) }
1d38375e3dbcd0acc5ac5b6fae20c71d77f25926.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_tcsp.cu * * @brief Simple test driver program for Gunrock template. */ #include <gunrock/app/tc/tc_app.cu> #include <gunrock/app/test_base.cuh> using namespace gunrock; /****************************************************************************** * Main ******************************************************************************/ /** * @brief Enclosure to the main function */ struct main_struct { /** * @brief the actual main function, after type switching * @tparam VertexT Type of vertex identifier * @tparam SizeT Type of graph size, i.e. type of edge identifier * @tparam ValueT Type of edge values * @param parameters Command line parameters * @param v,s,val Place holders for type deduction * \return hipError_t error metcage(s), if any */ template <typename VertexT, // Use int as the vertex identifier typename SizeT, // Use int as the graph size type typename ValueT> // Use float as the value type hipError_t operator()(util::Parameters &parameters, VertexT v, SizeT s, ValueT val) { typedef typename app::TestGraph<VertexT, SizeT, ValueT, graph::HAS_EDGE_VALUES | graph::HAS_CSR> GraphT; hipError_t retval = hipSuccess; util::CpuTimer cpu_timer; GraphT graph; cpu_timer.Start(); GUARD_CU(graphio::LoadGraph(parameters, graph)); cpu_timer.Stop(); parameters.Set("load-time", cpu_timer.ElapsedMillis()); bool quick = parameters.Get<bool>("quick"); bool quiet = parameters.Get<bool>("quiet"); std::string validation = parameters.Get<std::string>("validation"); if (quick && (parameters.UseDefault("validation") == false && validation != "none")) { util::PrintMsg("Invalid options --quick and --validation=" + validation + ", no CPU reference result to validate"); return retval; } int num_runs = parameters.Get<int>("num-runs"); SizeT nodes = graph.nodes; VertexT *ref_tc_counts = new VertexT[nodes]; if (!quick) { util::PrintMsg("__________________________", !quiet); float elapsed = app::tc::CPU_Reference(parameters, graph.csr(), ref_tc_counts); util::PrintMsg("__________________________\nRun CPU Reference Avg. in " + std::to_string(num_runs) + " iterations elapsed: " + std::to_string(elapsed) + " ms", !quiet); } std::vector<std::string> switches{"advance-mode"}; GUARD_CU(app::Switch_Parameters( parameters, graph, switches, [ref_tc_counts](util::Parameters &parameters, GraphT &graph) { return app::tc::RunTests(parameters, graph, ref_tc_counts); })); if (ref_tc_counts != NULL) { delete[] ref_tc_counts; ref_tc_counts = NULL; } return retval; } }; int main(int argc, char **argv) { hipError_t retval = hipSuccess; util::Parameters parameters("test Triangle Counting"); GUARD_CU(graphio::UseParameters(parameters)); GUARD_CU(app::tc::UseParameters(parameters)); GUARD_CU(app::UseParameters_test(parameters)); GUARD_CU(parameters.Parse_CommandLine(argc, argv)); if (parameters.Get<bool>("help")) { parameters.Print_Help(); return hipSuccess; } GUARD_CU(parameters.Check_Required()); if (!parameters.Get<bool>("sort-csr")) { util::PrintMsg("TC requires sorted CSR, using --sort-csr"); // return gunrock::util::GRError( // hipErrorInvalidValue, "TC requires a sorted CSR", __FILE__, __LINE__); parameters.Set("sort-csr", true); } return app::Switch_Types<app::VERTEXT_U32B | // app::VERTEXT_U64B | app::SIZET_U32B | // app::SIZET_U64B | app::VALUET_F64B | app::UNDIRECTED | app::DIRECTED>( parameters, main_struct()); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
1d38375e3dbcd0acc5ac5b6fae20c71d77f25926.cu
// ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_tcsp.cu * * @brief Simple test driver program for Gunrock template. */ #include <gunrock/app/tc/tc_app.cu> #include <gunrock/app/test_base.cuh> using namespace gunrock; /****************************************************************************** * Main ******************************************************************************/ /** * @brief Enclosure to the main function */ struct main_struct { /** * @brief the actual main function, after type switching * @tparam VertexT Type of vertex identifier * @tparam SizeT Type of graph size, i.e. type of edge identifier * @tparam ValueT Type of edge values * @param parameters Command line parameters * @param v,s,val Place holders for type deduction * \return cudaError_t error metcage(s), if any */ template <typename VertexT, // Use int as the vertex identifier typename SizeT, // Use int as the graph size type typename ValueT> // Use float as the value type cudaError_t operator()(util::Parameters &parameters, VertexT v, SizeT s, ValueT val) { typedef typename app::TestGraph<VertexT, SizeT, ValueT, graph::HAS_EDGE_VALUES | graph::HAS_CSR> GraphT; cudaError_t retval = cudaSuccess; util::CpuTimer cpu_timer; GraphT graph; cpu_timer.Start(); GUARD_CU(graphio::LoadGraph(parameters, graph)); cpu_timer.Stop(); parameters.Set("load-time", cpu_timer.ElapsedMillis()); bool quick = parameters.Get<bool>("quick"); bool quiet = parameters.Get<bool>("quiet"); std::string validation = parameters.Get<std::string>("validation"); if (quick && (parameters.UseDefault("validation") == false && validation != "none")) { util::PrintMsg("Invalid options --quick and --validation=" + validation + ", no CPU reference result to validate"); return retval; } int num_runs = parameters.Get<int>("num-runs"); SizeT nodes = graph.nodes; VertexT *ref_tc_counts = new VertexT[nodes]; if (!quick) { util::PrintMsg("__________________________", !quiet); float elapsed = app::tc::CPU_Reference(parameters, graph.csr(), ref_tc_counts); util::PrintMsg("__________________________\nRun CPU Reference Avg. in " + std::to_string(num_runs) + " iterations elapsed: " + std::to_string(elapsed) + " ms", !quiet); } std::vector<std::string> switches{"advance-mode"}; GUARD_CU(app::Switch_Parameters( parameters, graph, switches, [ref_tc_counts](util::Parameters &parameters, GraphT &graph) { return app::tc::RunTests(parameters, graph, ref_tc_counts); })); if (ref_tc_counts != NULL) { delete[] ref_tc_counts; ref_tc_counts = NULL; } return retval; } }; int main(int argc, char **argv) { cudaError_t retval = cudaSuccess; util::Parameters parameters("test Triangle Counting"); GUARD_CU(graphio::UseParameters(parameters)); GUARD_CU(app::tc::UseParameters(parameters)); GUARD_CU(app::UseParameters_test(parameters)); GUARD_CU(parameters.Parse_CommandLine(argc, argv)); if (parameters.Get<bool>("help")) { parameters.Print_Help(); return cudaSuccess; } GUARD_CU(parameters.Check_Required()); if (!parameters.Get<bool>("sort-csr")) { util::PrintMsg("TC requires sorted CSR, using --sort-csr"); // return gunrock::util::GRError( // cudaErrorInvalidValue, "TC requires a sorted CSR", __FILE__, __LINE__); parameters.Set("sort-csr", true); } return app::Switch_Types<app::VERTEXT_U32B | // app::VERTEXT_U64B | app::SIZET_U32B | // app::SIZET_U64B | app::VALUET_F64B | app::UNDIRECTED | app::DIRECTED>( parameters, main_struct()); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
f4c571e74751a60b5baee8c67a23110cf86e21df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "dspk.h" using namespace std; namespace kso { namespace img { namespace dspk { void denoise(buf * data_buf, float tmin, float tmax, uint Niter){ buf * db = data_buf; uint ndim = db->ndim; uint nmet = db->nmet; uint ksz1 = db->ksz; // dim3 ksz(25, 25, 25); dim3 ksz(ksz1,ksz1,ksz1); float * dt = db->dt; // float * q1 = db->q1; float * q2 = db->q2; // float * q3 = db->q3; float * ht = db->ht; float * cs = db->cs; float * t0 = db->t0; float * t1 = db->t1; uint * newBad = db->newBad; float * dt_d = db->dt_d; float * gm_d = db->gm_d; float * q2_d = db->q2_d; float * gdev_d = db->gdev_d; float * nsd_d = db->nsd_d; float * tmp_d = db->tmp_d; float * norm_d = db->norm_d; float * ht_d = db->ht_d; float * cs_d = db->cs_d; float * t0_d = db->t0_d; float * t1_d = db->t1_d; float * T0_d = db->T0_d; float * T1_d = db->T1_d; uint * newBad_d = db->newBad_d; dim3 hsz = db->hsz; uint hsz3 = db->hsz3; uint num_strides = db->S->num_strides; uint * A = db->S->A; uint * a = db->S->a; uint * L = db->S->L; uint * l = db->S->l; uint * B = db->S->B; uint * b = db->S->b; uint * M = db->S->M; uint * m = db->S->m; uint * b_d = db->S->b_d; dim3 dsz = db->sz; uint dsz3 = db->sz3; uint sz3; dim3 sz; dim3 threads, blocks; uint totBad = 0; cout << num_strides << endl; float time; hipEvent_t start, stop; CHECK( hipEventCreate(&start) ); CHECK( hipEventCreate(&stop) ); CHECK( hipEventRecord(start, 0) ); // loop over chunks for(uint s = 0; s < num_strides; s++){ cout << A[s] << endl; cout << L[s] << endl; cout << a[s] << endl; cout << l[s] << endl; cout << b_d[s] << endl; cout << "-----------------" << endl; // calculate size for this iteration sz.x = db->csz.x; sz.y = db->csz.y; sz.z = db->S->L[s]; sz3 = sz.x * sz.y * sz.z; // calculate number of threads in each dimension threads.x = sz.x; threads.y = 1; threads.z = 1; // calculate number of blocks in each dimension blocks.x = 1; blocks.y = sz.y; blocks.z = sz.z; // copy memory to device CHECK(hipMemcpy(dt_d, dt + B[s], M[s] * sizeof(float), hipMemcpyHostToDevice)); // CHECK(hipMemcpy(gm_d, gm + B[s], M[s] * sizeof(float), hipMemcpyHostToDevice)); // initialize good pixel map hipLaunchKernelGGL(( init_gm), dim3(blocks), dim3(threads), 0, 0, gm_d, dt_d, sz); hipLaunchKernelGGL(( init_hist), dim3(hsz.y), dim3(hsz.x), 0, 0, ht_d, hsz, nmet); hipLaunchKernelGGL(( init_thresh), dim3(1), dim3(hsz.x), 0, 0, t0_d, t1_d, hsz, nmet); cout << "Median Filter" << endl; *newBad = 0; // reset the number of bad pixels found for this iteration CHECK(hipMemcpy(newBad_d, newBad, sizeof(uint), hipMemcpyHostToDevice)); // loop over each axis for(uint ax = 0; ax < nmet; ax++){ // move pointer to correct place in memory float * q2x_d = q2_d + ax * sz3; float * htx_d = ht_d + ax * hsz3; float * csx_d = cs_d + ax * hsz3; float * t0x_d = t0_d + ax * hsz.x; float * t1x_d = t1_d + ax * hsz.x; float * T0x_d = T0_d + ax * hsz.x; float * T1x_d = T1_d + ax * hsz.x; float * q2x = q2 + ax * dsz3; float * htx = ht + ax * hsz3; float * csx = cs + ax * hsz3; float * t0x = t0 + ax * hsz.x; float * t1x = t1 + ax * hsz.x; if(ax < ndim){ // independent median for each axis hipLaunchKernelGGL(( calc_sep_quartile), dim3(blocks), dim3(threads), 0, 0, q2x_d, dt_d, gm_d, sz, ksz, uv[ax], 2); } else { // separable median calc_quartile(q2x_d, dt_d, gm_d, tmp_d, sz, ksz, 2); // calc_norm_0<<<blocks, threads>>>(norm_d, gm_d, newBad_d, sz, ksz1); // calc_norm_1<<<blocks, threads>>>(tmp_d, norm_d, sz, ksz1); // calc_norm_2<<<blocks, threads>>>(norm_d, tmp_d, sz, ksz1); // // calc_gdev_0<<<blocks, threads>>>(q2x_d, dt_d, gm_d, sz, ksz1); // calc_gdev_1<<<blocks, threads>>>(tmp_d, q2x_d, sz, ksz1); // calc_gdev_2<<<blocks, threads>>>(q2x_d, tmp_d, dt_d, gm_d, norm_d, sz, ksz1); } hipLaunchKernelGGL(( calc_hist), dim3(blocks), dim3(threads), 0, 0, htx_d, dt_d, q2x_d, gm_d, sz, hsz); hipLaunchKernelGGL(( calc_cumsum), dim3(1),dim3(hsz.x), 0, 0, csx_d, htx_d, hsz); hipLaunchKernelGGL(( calc_thresh), dim3(1),dim3(hsz.x), 0, 0, t0x_d, t1x_d, htx_d, csx_d, hsz, tmin, tmax); // smooth_thresh<<<1,hsz.x>>>(t0x_d, T0x_d, hsz, 3); // smooth_thresh<<<1,hsz.x>>>(t1x_d, T1x_d, hsz, 3); cout << ax << endl; CHECK(hipMemcpy(q2x + b[s], q2x_d + b_d[s], m[s] * sizeof(float), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(htx, htx_d, db->hsz3 * sizeof(float), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(csx, csx_d, db->hsz3 * sizeof(float), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(t0x, t0x_d, hsz.x * sizeof(float), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(t1x, t1x_d, hsz.x * sizeof(float), hipMemcpyDeviceToHost)); } hipLaunchKernelGGL(( calc_gm), dim3(blocks),dim3(threads), 0, 0, gm_d, newBad_d, dt_d, q2_d, t0_d, t1_d, sz, hsz, nmet); // ///////////////////////////////////////////////////////////////////// // cout << "Mean Filter" << endl; // // // Number of identification iterations // for(uint iter = 0; iter < Niter; iter++){ // // *newBad = 0; // reset the number of bad pixels found for this iteration // CHECK(hipMemcpy(newBad_d, newBad, sizeof(uint), hipMemcpyHostToDevice)); // // calc_norm_0<<<blocks, threads>>>(norm_d, gm_d, newBad_d, sz, ksz1); // calc_norm_1<<<blocks, threads>>>(tmp_d, norm_d, sz, ksz1); // calc_norm_2<<<blocks, threads>>>(norm_d, tmp_d, sz, ksz1); // // calc_gdev_0<<<blocks, threads>>>(gdev_d, dt_d, gm_d, sz, ksz1); // calc_gdev_1<<<blocks, threads>>>(tmp_d, gdev_d, sz, ksz1); // calc_gdev_2<<<blocks, threads>>>(gdev_d, tmp_d, dt_d, gm_d, norm_d, sz, ksz1); // // tmax = 0.99; // tmin = 0.01; // // // hipLaunchKernelGGL(( init_hist), dim3(hsz.y), dim3(hsz.x), 0, 0, ht_d, hsz, nmet); // hipLaunchKernelGGL(( init_thresh), dim3(1), dim3(hsz.x), 0, 0, t0_d, t1_d, hsz, nmet); // hipLaunchKernelGGL(( calc_hist), dim3(blocks), dim3(threads), 0, 0, ht_d, dt_d, gdev_d, gm_d, sz, hsz); // hipLaunchKernelGGL(( calc_cumsum), dim3(1),dim3(hsz.x), 0, 0, cs_d, ht_d, hsz); // hipLaunchKernelGGL(( calc_thresh), dim3(1),dim3(hsz.x), 0, 0, t0_d, t1_d, ht_d, cs_d, hsz, tmin, tmax); //// smooth_thresh<<<1,hsz.x>>>(t0_d, T0_d, hsz, 3); //// smooth_thresh<<<1,hsz.x>>>(t1_d, T1_d, hsz, 3); // // hipLaunchKernelGGL(( init_gm), dim3(blocks), dim3(threads), 0, 0, gm_d, dt_d, sz); // // // calculate mean on unmasked data // calc_norm_0<<<blocks, threads>>>(norm_d, gm_d, newBad_d, sz, ksz1); // calc_norm_1<<<blocks, threads>>>(tmp_d, norm_d, sz, ksz1); // calc_norm_2<<<blocks, threads>>>(norm_d, tmp_d, sz, ksz1); // // calc_gdev_0<<<blocks, threads>>>(gdev_d, dt_d, gm_d, sz, ksz1); // calc_gdev_1<<<blocks, threads>>>(tmp_d, gdev_d, sz, ksz1); // calc_gdev_2<<<blocks, threads>>>(gdev_d, tmp_d, dt_d, gm_d, norm_d, sz, ksz1); // // hipLaunchKernelGGL(( init_hist), dim3(hsz.y), dim3(hsz.x), 0, 0, ht_d, hsz, nmet); // hipLaunchKernelGGL(( calc_hist), dim3(blocks), dim3(threads), 0, 0, ht_d, dt_d, gdev_d, gm_d, sz, hsz); // // hipLaunchKernelGGL(( calc_gm), dim3(blocks),dim3(threads), 0, 0, gm_d, newBad_d, dt_d, gdev_d, t0_d, t1_d, sz, hsz); // // // kso::img::dspk::calc_nsd_0<<<blocks, threads>>>(nsd_d, gdev_d, sz, ksz1); // // kso::img::dspk::calc_nsd_1<<<blocks, threads>>>(tmp_d, nsd_d, sz, ksz1); // // kso::img::dspk::calc_nsd_2<<<blocks, threads>>>(nsd_d, tmp_d, norm_d, sz, ksz1); // // // // kso::img::dspk::calc_gm<<<blocks, threads>>>(gm_d, gdev_d, nsd_d, dt_d, std_dev, newBad_d, sz, ksz1); // // CHECK(hipMemcpy(q2 + b[s], gdev_d + b_d[s], m[s] * sizeof(float), hipMemcpyDeviceToHost)); // CHECK(hipMemcpy(ht, ht_d, db->hsz3 * sizeof(float), hipMemcpyDeviceToHost)); // CHECK(hipMemcpy(cs, cs_d, db->hsz3 * sizeof(float), hipMemcpyDeviceToHost)); // CHECK(hipMemcpy(t0, t0_d, hsz.x * sizeof(float), hipMemcpyDeviceToHost)); // CHECK(hipMemcpy(t1, t1_d, hsz.x * sizeof(float), hipMemcpyDeviceToHost)); // // CHECK(hipMemcpy(newBad, newBad_d, sizeof(uint), hipMemcpyDeviceToHost)); // cout << "Iteration " << iter << ": found " << *newBad << " bad pixels\n"; // totBad = totBad + *newBad; // // if(*newBad == 0){ // stop if we're not finding any pixels // break; // } // // } // ksz = 9; // // kso::img::dspk::calc_lmn_0<<<blocks, threads>>>(norm_d, gm_d, newBad_d, sz, ksz1); // kso::img::dspk::calc_lmn_1<<<blocks, threads>>>(tmp_d, norm_d, sz, ksz1); // kso::img::dspk::calc_lmn_2<<<blocks, threads>>>(norm_d, tmp_d, sz, ksz1); // // float * gdt_d = gdev_d; // reuse neighborhood mean memory // kso::img::dspk::calc_gdt<<<blocks, threads>>>(gdt_d, dt_d, gm_d, sz); // // float * tp; // temporary pointer // // Niter = 10; // // // // for(uint iter = 0; iter < Niter; iter++){ // // // switch locations of temp and data buffer so this for loop works right // tp = gdt_d; // gdt_d = tmp_d; // tmp_d = tp; // // kso::img::dspk::calc_gdt_0<<<blocks, threads>>>(gdt_d, tmp_d, gm_d, sz, ksz1); // kso::img::dspk::calc_gdt_1<<<blocks, threads>>>(tmp_d, gdt_d, sz, ksz1); // kso::img::dspk::calc_gdt_2<<<blocks, threads>>>(gdt_d, tmp_d, dt_d, gm_d, norm_d, sz, ksz1); // // cout << "Iteration " << iter << endl; // // } CHECK(hipDeviceSynchronize()); // copy back from devicecudaMemcpyDeviceToHost; CHECK(hipMemcpy(dt + b[s], dt_d + b_d[s], m[s] * sizeof(float), hipMemcpyDeviceToHost)); CHECK( hipEventRecord(stop, 0) ); CHECK( hipEventSynchronize(stop) ); CHECK( hipEventElapsedTime(&time, start, stop) ); printf("CUDA-measured time: %3.1f ms \n", time); cout << "Total bad pixels: " << totBad << endl; cout << "Percentage bad pixels: " << ((float) totBad) / ((float) sz3) << endl; } hipFree(db->buf_d); return; } void denoise_ndarr(const np::ndarray & data, const np::ndarray & goodmap, const np::ndarray & hist, float tmin, float tmax, uint hsx, uint hsy, uint k_sz, uint Niter){ // shape of input data dim3 sz; sz.z = data.get_shape()[0]; sz.y = data.get_shape()[1]; sz.x = data.get_shape()[2]; dim3 st; st.z = data.get_strides()[0] / sizeof(float); st.y = data.get_strides()[1] / sizeof(float); st.x = data.get_strides()[2] / sizeof(float); dim3 hsz(hsx, hsy, 0); // extract float data from numpy array float * dt = (float *) data.get_data(); float * gm = (float *) goodmap.get_data(); uint n_threads = 1; buf * db = new buf(dt, gm, sz, k_sz, hsz, n_threads); // denoise(db, tmin, tmax, Niter); } //np::ndarray denoise_fits_file(py::str path, float med_dev, float std_dev, uint k_sz, uint Niter){ // // // string cpath = "/kso/iris_l2_20150615_072426_3610091469_raster_t000_r00000.fits"; //// string cpath = "/kso/iris_l2_20141129_000738_3860009154_raster_t000_r00000.fits"; // // uint n_threads = 1; // uint max_sz = pow(2,30); // 1 GB // // buf * db = new buf(cpath, max_sz, k_sz, n_threads); // // denoise(db, med_dev, std_dev, Niter); // // py::object own = py::object(); // py::tuple shape = py::make_tuple(db->sz.z, db->sz.y, db->sz.x); // py::tuple stride = py::make_tuple(db->sb.z, db->sb.y, db->sb.x); // np::dtype dtype = np::dtype::get_builtin<float>(); // // return np::from_data(db->dt, dtype, shape, stride, own); // //} np::ndarray denoise_fits_file_quartiles(const np::ndarray & q2, const np::ndarray & hist, const np::ndarray & cumsum, const np::ndarray & t0, const np::ndarray & t1, uint hsx, uint hsy, uint k_sz){ uint Niter = 1; string cpath = "/kso/iris_l2_20150615_072426_3610091469_raster_t000_r00000.fits"; // string cpath = "/kso/iris_l2_20140917_015809_3862257453_raster_t000_r00000.fits"; dim3 hsz(hsx, hsy, 0); uint n_threads = 1; uint max_sz = pow(2,30); // 1 GB buf * db = new buf(cpath, max_sz, k_sz, hsz, n_threads); db->q2 = (float *)q2.get_data(); db->ht = (float *)hist.get_data(); db->cs = (float *)cumsum.get_data(); db->t0 = (float *)t0.get_data(); db->t1 = (float *)t1.get_data(); float tmax = 0.98; float tmin = 0.01; denoise(db, tmin, tmax, Niter); py::object own = py::object(); py::tuple shape = py::make_tuple(db->sz.z, db->sz.y, db->sz.x); py::tuple stride = py::make_tuple(db->sb.z, db->sb.y, db->sb.x); np::dtype dtype = np::dtype::get_builtin<float>(); return np::from_data(db->dt, dtype, shape, stride, own); } np::ndarray read_fits_file(){ uint Niter = 1; string cpath = "/kso/iris_l2_20150615_072426_3610091469_raster_t000_r00000.fits"; // string cpath = "/kso/iris_l2_20140917_015809_3862257453_raster_t000_r00000.fits"; uint n_threads = 1; uint max_sz = pow(2,30); // 1 GB buf * db = new buf(cpath, max_sz, 1, 1, n_threads); py::object own = py::object(); py::tuple shape = py::make_tuple(db->sz.z, db->sz.y, db->sz.x); py::tuple stride = py::make_tuple(db->sb.z, db->sb.y, db->sb.x); np::dtype dtype = np::dtype::get_builtin<float>(); return np::from_data(db->dt, dtype, shape, stride, own); } } } }
f4c571e74751a60b5baee8c67a23110cf86e21df.cu
#include "dspk.h" using namespace std; namespace kso { namespace img { namespace dspk { void denoise(buf * data_buf, float tmin, float tmax, uint Niter){ buf * db = data_buf; uint ndim = db->ndim; uint nmet = db->nmet; uint ksz1 = db->ksz; // dim3 ksz(25, 25, 25); dim3 ksz(ksz1,ksz1,ksz1); float * dt = db->dt; // float * q1 = db->q1; float * q2 = db->q2; // float * q3 = db->q3; float * ht = db->ht; float * cs = db->cs; float * t0 = db->t0; float * t1 = db->t1; uint * newBad = db->newBad; float * dt_d = db->dt_d; float * gm_d = db->gm_d; float * q2_d = db->q2_d; float * gdev_d = db->gdev_d; float * nsd_d = db->nsd_d; float * tmp_d = db->tmp_d; float * norm_d = db->norm_d; float * ht_d = db->ht_d; float * cs_d = db->cs_d; float * t0_d = db->t0_d; float * t1_d = db->t1_d; float * T0_d = db->T0_d; float * T1_d = db->T1_d; uint * newBad_d = db->newBad_d; dim3 hsz = db->hsz; uint hsz3 = db->hsz3; uint num_strides = db->S->num_strides; uint * A = db->S->A; uint * a = db->S->a; uint * L = db->S->L; uint * l = db->S->l; uint * B = db->S->B; uint * b = db->S->b; uint * M = db->S->M; uint * m = db->S->m; uint * b_d = db->S->b_d; dim3 dsz = db->sz; uint dsz3 = db->sz3; uint sz3; dim3 sz; dim3 threads, blocks; uint totBad = 0; cout << num_strides << endl; float time; cudaEvent_t start, stop; CHECK( cudaEventCreate(&start) ); CHECK( cudaEventCreate(&stop) ); CHECK( cudaEventRecord(start, 0) ); // loop over chunks for(uint s = 0; s < num_strides; s++){ cout << A[s] << endl; cout << L[s] << endl; cout << a[s] << endl; cout << l[s] << endl; cout << b_d[s] << endl; cout << "-----------------" << endl; // calculate size for this iteration sz.x = db->csz.x; sz.y = db->csz.y; sz.z = db->S->L[s]; sz3 = sz.x * sz.y * sz.z; // calculate number of threads in each dimension threads.x = sz.x; threads.y = 1; threads.z = 1; // calculate number of blocks in each dimension blocks.x = 1; blocks.y = sz.y; blocks.z = sz.z; // copy memory to device CHECK(cudaMemcpy(dt_d, dt + B[s], M[s] * sizeof(float), cudaMemcpyHostToDevice)); // CHECK(cudaMemcpy(gm_d, gm + B[s], M[s] * sizeof(float), cudaMemcpyHostToDevice)); // initialize good pixel map init_gm<<<blocks, threads>>>(gm_d, dt_d, sz); init_hist<<<hsz.y, hsz.x>>>(ht_d, hsz, nmet); init_thresh<<<1, hsz.x>>>(t0_d, t1_d, hsz, nmet); cout << "Median Filter" << endl; *newBad = 0; // reset the number of bad pixels found for this iteration CHECK(cudaMemcpy(newBad_d, newBad, sizeof(uint), cudaMemcpyHostToDevice)); // loop over each axis for(uint ax = 0; ax < nmet; ax++){ // move pointer to correct place in memory float * q2x_d = q2_d + ax * sz3; float * htx_d = ht_d + ax * hsz3; float * csx_d = cs_d + ax * hsz3; float * t0x_d = t0_d + ax * hsz.x; float * t1x_d = t1_d + ax * hsz.x; float * T0x_d = T0_d + ax * hsz.x; float * T1x_d = T1_d + ax * hsz.x; float * q2x = q2 + ax * dsz3; float * htx = ht + ax * hsz3; float * csx = cs + ax * hsz3; float * t0x = t0 + ax * hsz.x; float * t1x = t1 + ax * hsz.x; if(ax < ndim){ // independent median for each axis calc_sep_quartile<<<blocks, threads>>>(q2x_d, dt_d, gm_d, sz, ksz, uv[ax], 2); } else { // separable median calc_quartile(q2x_d, dt_d, gm_d, tmp_d, sz, ksz, 2); // calc_norm_0<<<blocks, threads>>>(norm_d, gm_d, newBad_d, sz, ksz1); // calc_norm_1<<<blocks, threads>>>(tmp_d, norm_d, sz, ksz1); // calc_norm_2<<<blocks, threads>>>(norm_d, tmp_d, sz, ksz1); // // calc_gdev_0<<<blocks, threads>>>(q2x_d, dt_d, gm_d, sz, ksz1); // calc_gdev_1<<<blocks, threads>>>(tmp_d, q2x_d, sz, ksz1); // calc_gdev_2<<<blocks, threads>>>(q2x_d, tmp_d, dt_d, gm_d, norm_d, sz, ksz1); } calc_hist<<<blocks, threads>>>(htx_d, dt_d, q2x_d, gm_d, sz, hsz); calc_cumsum<<<1,hsz.x>>>(csx_d, htx_d, hsz); calc_thresh<<<1,hsz.x>>>(t0x_d, t1x_d, htx_d, csx_d, hsz, tmin, tmax); // smooth_thresh<<<1,hsz.x>>>(t0x_d, T0x_d, hsz, 3); // smooth_thresh<<<1,hsz.x>>>(t1x_d, T1x_d, hsz, 3); cout << ax << endl; CHECK(cudaMemcpy(q2x + b[s], q2x_d + b_d[s], m[s] * sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(htx, htx_d, db->hsz3 * sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(csx, csx_d, db->hsz3 * sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(t0x, t0x_d, hsz.x * sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(t1x, t1x_d, hsz.x * sizeof(float), cudaMemcpyDeviceToHost)); } calc_gm<<<blocks,threads>>>(gm_d, newBad_d, dt_d, q2_d, t0_d, t1_d, sz, hsz, nmet); // ///////////////////////////////////////////////////////////////////// // cout << "Mean Filter" << endl; // // // Number of identification iterations // for(uint iter = 0; iter < Niter; iter++){ // // *newBad = 0; // reset the number of bad pixels found for this iteration // CHECK(cudaMemcpy(newBad_d, newBad, sizeof(uint), cudaMemcpyHostToDevice)); // // calc_norm_0<<<blocks, threads>>>(norm_d, gm_d, newBad_d, sz, ksz1); // calc_norm_1<<<blocks, threads>>>(tmp_d, norm_d, sz, ksz1); // calc_norm_2<<<blocks, threads>>>(norm_d, tmp_d, sz, ksz1); // // calc_gdev_0<<<blocks, threads>>>(gdev_d, dt_d, gm_d, sz, ksz1); // calc_gdev_1<<<blocks, threads>>>(tmp_d, gdev_d, sz, ksz1); // calc_gdev_2<<<blocks, threads>>>(gdev_d, tmp_d, dt_d, gm_d, norm_d, sz, ksz1); // // tmax = 0.99; // tmin = 0.01; // // // init_hist<<<hsz.y, hsz.x>>>(ht_d, hsz, nmet); // init_thresh<<<1, hsz.x>>>(t0_d, t1_d, hsz, nmet); // calc_hist<<<blocks, threads>>>(ht_d, dt_d, gdev_d, gm_d, sz, hsz); // calc_cumsum<<<1,hsz.x>>>(cs_d, ht_d, hsz); // calc_thresh<<<1,hsz.x>>>(t0_d, t1_d, ht_d, cs_d, hsz, tmin, tmax); //// smooth_thresh<<<1,hsz.x>>>(t0_d, T0_d, hsz, 3); //// smooth_thresh<<<1,hsz.x>>>(t1_d, T1_d, hsz, 3); // // init_gm<<<blocks, threads>>>(gm_d, dt_d, sz); // // // calculate mean on unmasked data // calc_norm_0<<<blocks, threads>>>(norm_d, gm_d, newBad_d, sz, ksz1); // calc_norm_1<<<blocks, threads>>>(tmp_d, norm_d, sz, ksz1); // calc_norm_2<<<blocks, threads>>>(norm_d, tmp_d, sz, ksz1); // // calc_gdev_0<<<blocks, threads>>>(gdev_d, dt_d, gm_d, sz, ksz1); // calc_gdev_1<<<blocks, threads>>>(tmp_d, gdev_d, sz, ksz1); // calc_gdev_2<<<blocks, threads>>>(gdev_d, tmp_d, dt_d, gm_d, norm_d, sz, ksz1); // // init_hist<<<hsz.y, hsz.x>>>(ht_d, hsz, nmet); // calc_hist<<<blocks, threads>>>(ht_d, dt_d, gdev_d, gm_d, sz, hsz); // // calc_gm<<<blocks,threads>>>(gm_d, newBad_d, dt_d, gdev_d, t0_d, t1_d, sz, hsz); // // // kso::img::dspk::calc_nsd_0<<<blocks, threads>>>(nsd_d, gdev_d, sz, ksz1); // // kso::img::dspk::calc_nsd_1<<<blocks, threads>>>(tmp_d, nsd_d, sz, ksz1); // // kso::img::dspk::calc_nsd_2<<<blocks, threads>>>(nsd_d, tmp_d, norm_d, sz, ksz1); // // // // kso::img::dspk::calc_gm<<<blocks, threads>>>(gm_d, gdev_d, nsd_d, dt_d, std_dev, newBad_d, sz, ksz1); // // CHECK(cudaMemcpy(q2 + b[s], gdev_d + b_d[s], m[s] * sizeof(float), cudaMemcpyDeviceToHost)); // CHECK(cudaMemcpy(ht, ht_d, db->hsz3 * sizeof(float), cudaMemcpyDeviceToHost)); // CHECK(cudaMemcpy(cs, cs_d, db->hsz3 * sizeof(float), cudaMemcpyDeviceToHost)); // CHECK(cudaMemcpy(t0, t0_d, hsz.x * sizeof(float), cudaMemcpyDeviceToHost)); // CHECK(cudaMemcpy(t1, t1_d, hsz.x * sizeof(float), cudaMemcpyDeviceToHost)); // // CHECK(cudaMemcpy(newBad, newBad_d, sizeof(uint), cudaMemcpyDeviceToHost)); // cout << "Iteration " << iter << ": found " << *newBad << " bad pixels\n"; // totBad = totBad + *newBad; // // if(*newBad == 0){ // stop if we're not finding any pixels // break; // } // // } // ksz = 9; // // kso::img::dspk::calc_lmn_0<<<blocks, threads>>>(norm_d, gm_d, newBad_d, sz, ksz1); // kso::img::dspk::calc_lmn_1<<<blocks, threads>>>(tmp_d, norm_d, sz, ksz1); // kso::img::dspk::calc_lmn_2<<<blocks, threads>>>(norm_d, tmp_d, sz, ksz1); // // float * gdt_d = gdev_d; // reuse neighborhood mean memory // kso::img::dspk::calc_gdt<<<blocks, threads>>>(gdt_d, dt_d, gm_d, sz); // // float * tp; // temporary pointer // // Niter = 10; // // // // for(uint iter = 0; iter < Niter; iter++){ // // // switch locations of temp and data buffer so this for loop works right // tp = gdt_d; // gdt_d = tmp_d; // tmp_d = tp; // // kso::img::dspk::calc_gdt_0<<<blocks, threads>>>(gdt_d, tmp_d, gm_d, sz, ksz1); // kso::img::dspk::calc_gdt_1<<<blocks, threads>>>(tmp_d, gdt_d, sz, ksz1); // kso::img::dspk::calc_gdt_2<<<blocks, threads>>>(gdt_d, tmp_d, dt_d, gm_d, norm_d, sz, ksz1); // // cout << "Iteration " << iter << endl; // // } CHECK(cudaDeviceSynchronize()); // copy back from devicecudaMemcpyDeviceToHost; CHECK(cudaMemcpy(dt + b[s], dt_d + b_d[s], m[s] * sizeof(float), cudaMemcpyDeviceToHost)); CHECK( cudaEventRecord(stop, 0) ); CHECK( cudaEventSynchronize(stop) ); CHECK( cudaEventElapsedTime(&time, start, stop) ); printf("CUDA-measured time: %3.1f ms \n", time); cout << "Total bad pixels: " << totBad << endl; cout << "Percentage bad pixels: " << ((float) totBad) / ((float) sz3) << endl; } cudaFree(db->buf_d); return; } void denoise_ndarr(const np::ndarray & data, const np::ndarray & goodmap, const np::ndarray & hist, float tmin, float tmax, uint hsx, uint hsy, uint k_sz, uint Niter){ // shape of input data dim3 sz; sz.z = data.get_shape()[0]; sz.y = data.get_shape()[1]; sz.x = data.get_shape()[2]; dim3 st; st.z = data.get_strides()[0] / sizeof(float); st.y = data.get_strides()[1] / sizeof(float); st.x = data.get_strides()[2] / sizeof(float); dim3 hsz(hsx, hsy, 0); // extract float data from numpy array float * dt = (float *) data.get_data(); float * gm = (float *) goodmap.get_data(); uint n_threads = 1; buf * db = new buf(dt, gm, sz, k_sz, hsz, n_threads); // denoise(db, tmin, tmax, Niter); } //np::ndarray denoise_fits_file(py::str path, float med_dev, float std_dev, uint k_sz, uint Niter){ // // // string cpath = "/kso/iris_l2_20150615_072426_3610091469_raster_t000_r00000.fits"; //// string cpath = "/kso/iris_l2_20141129_000738_3860009154_raster_t000_r00000.fits"; // // uint n_threads = 1; // uint max_sz = pow(2,30); // 1 GB // // buf * db = new buf(cpath, max_sz, k_sz, n_threads); // // denoise(db, med_dev, std_dev, Niter); // // py::object own = py::object(); // py::tuple shape = py::make_tuple(db->sz.z, db->sz.y, db->sz.x); // py::tuple stride = py::make_tuple(db->sb.z, db->sb.y, db->sb.x); // np::dtype dtype = np::dtype::get_builtin<float>(); // // return np::from_data(db->dt, dtype, shape, stride, own); // //} np::ndarray denoise_fits_file_quartiles(const np::ndarray & q2, const np::ndarray & hist, const np::ndarray & cumsum, const np::ndarray & t0, const np::ndarray & t1, uint hsx, uint hsy, uint k_sz){ uint Niter = 1; string cpath = "/kso/iris_l2_20150615_072426_3610091469_raster_t000_r00000.fits"; // string cpath = "/kso/iris_l2_20140917_015809_3862257453_raster_t000_r00000.fits"; dim3 hsz(hsx, hsy, 0); uint n_threads = 1; uint max_sz = pow(2,30); // 1 GB buf * db = new buf(cpath, max_sz, k_sz, hsz, n_threads); db->q2 = (float *)q2.get_data(); db->ht = (float *)hist.get_data(); db->cs = (float *)cumsum.get_data(); db->t0 = (float *)t0.get_data(); db->t1 = (float *)t1.get_data(); float tmax = 0.98; float tmin = 0.01; denoise(db, tmin, tmax, Niter); py::object own = py::object(); py::tuple shape = py::make_tuple(db->sz.z, db->sz.y, db->sz.x); py::tuple stride = py::make_tuple(db->sb.z, db->sb.y, db->sb.x); np::dtype dtype = np::dtype::get_builtin<float>(); return np::from_data(db->dt, dtype, shape, stride, own); } np::ndarray read_fits_file(){ uint Niter = 1; string cpath = "/kso/iris_l2_20150615_072426_3610091469_raster_t000_r00000.fits"; // string cpath = "/kso/iris_l2_20140917_015809_3862257453_raster_t000_r00000.fits"; uint n_threads = 1; uint max_sz = pow(2,30); // 1 GB buf * db = new buf(cpath, max_sz, 1, 1, n_threads); py::object own = py::object(); py::tuple shape = py::make_tuple(db->sz.z, db->sz.y, db->sz.x); py::tuple stride = py::make_tuple(db->sb.z, db->sb.y, db->sb.x); np::dtype dtype = np::dtype::get_builtin<float>(); return np::from_data(db->dt, dtype, shape, stride, own); } } } }
ba921ddf30f896356395ddff99681b97ac95f3d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include "ising_cuda.h" #define N 700 #define K 200 #define SPINS_PER_THREAD_DIM 3 #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } __global__ void checkForNoChanges(int *G, int *H, int *checkForNoChanges, int n){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if( (i < n) && (j < n)){ if( G[i*n + j] != H[i*n + j] ){ checkForNoChanges[0] = 1; } } } __global__ void ising(int *G, int *H, double *w, int n){ int i_start = (threadIdx.x + blockIdx.x * blockDim.x)*SPINS_PER_THREAD_DIM; int j_start = (threadIdx.y + blockIdx.y * blockDim.y)*SPINS_PER_THREAD_DIM; if( (i_start < n) && (j_start < n)){ int i_end = i_start + SPINS_PER_THREAD_DIM; int j_end = j_start + SPINS_PER_THREAD_DIM; if(i_end > n) i_end = n; if(j_end > n) j_end = n; double influence = 0.0; for(int k = i_start; k < i_end; k++){ for(int l = j_start; l < j_end; l++){ for(int x = 0; x < 5; x++){ for(int y = 0; y < 5; y++){ influence += w[x*5 + y]*G[((k - 2 + x + n)%n)*n + ((l - 2 + y + n)%n)]; } } H[k*n + l] = G[k*n + l]; if(influence > 0.000000001){ H[k*n + l] = 1; } else if(influence < -0.000000001) { H[k*n + l] = -1; } influence = 0.0; } } } } int main(int argc, char** argv){ // Declare all variables int n = 0; int k = 0; if (argc != 3) { n = N; k = K; } else { n = atoi(argv[1]); k = atoi(argv[2]); printf("Input n=%d k=%d", n, k); } // Check if SPINS_PER_THREAD_DIM is less than 3 if( SPINS_PER_THREAD_DIM > 3){ printf("\nERROR: SPINS_PER_THREAD_DIM must be less than 3. Aborting...\n"); return -1; } int *G, *G_final, *G_dev, *H_dev; double *w_dev; double w[25] = {0.004 , 0.016 , 0.026 , 0.016 , 0.004 , 0.016 , 0.071 , 0.117 , 0.071 , 0.016 , 0.026 , 0.117 , 0 , 0.117 , 0.026 , 0.016 , 0.071 , 0.117 , 0.071 , 0.016 , 0.004 , 0.016 , 0.026 , 0.016 , 0.004}; int *checkForNoChanges_SomeSpins; int *checkForNoChanges_AllSpins; int *checkForNoChanges_SomeSpins_dev; int *checkForNoChanges_AllSpins_dev; int iterations = k; // Allocate host memory G = (int*)malloc(n*n*sizeof(int)); G_final = (int*)malloc(n*n*sizeof(int)); if(G == NULL || G_final == NULL){ printf("\nERROR: Cannot allocate host memory. Aborting...\n"); return 1; } checkForNoChanges_SomeSpins = (int*)malloc(sizeof(int)); checkForNoChanges_AllSpins = (int*)malloc(sizeof(int)); // Allocate device memory HANDLE_ERROR( hipMalloc((void**) &G_dev, n*n*sizeof(int)) ); HANDLE_ERROR( hipMalloc((void**) &H_dev, n*n*sizeof(int)) ); HANDLE_ERROR( hipMalloc((void**) &w_dev, 25*sizeof(double))); HANDLE_ERROR( hipMalloc((void**) &checkForNoChanges_SomeSpins_dev, sizeof(int) )); HANDLE_ERROR( hipMalloc((void**) &checkForNoChanges_AllSpins_dev, sizeof(int) )); // Write to host memory /* Assign random values to G) */ int spin[] = {-1, 1}; for(int i = 0; i < n; i++) for(int j = 0; j < n; j++) G[i*n + j] = spin[rand()%2]; /* Assign values to checking variables */ checkForNoChanges_SomeSpins[0] = 0; checkForNoChanges_AllSpins[0] = 0; // Copy host memory to device memory HANDLE_ERROR( hipMemcpy(G_dev, G, n*n*sizeof(int), hipMemcpyHostToDevice) ); HANDLE_ERROR( hipMemcpy(w_dev, w, 25*sizeof(double), hipMemcpyHostToDevice)); HANDLE_ERROR( hipMemcpy(checkForNoChanges_SomeSpins_dev, checkForNoChanges_SomeSpins, sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR( hipMemcpy(checkForNoChanges_AllSpins_dev, checkForNoChanges_AllSpins, sizeof(int), hipMemcpyHostToDevice)); printf("\nComputing...\n"); // Capture start time hipEvent_t start, stop; HANDLE_ERROR( hipEventCreate( &start ) ); HANDLE_ERROR( hipEventCreate( &stop ) ); HANDLE_ERROR( hipEventRecord( start, 0 ) ); // Set kernel dimesions dim3 ising_Grid((int) ceil((double)N/(double)(32*SPINS_PER_THREAD_DIM)), (int) ceil((double)N/(double)(32*SPINS_PER_THREAD_DIM))); dim3 ising_Block(32, 32); dim3 checkForNoChanges_SomeSpins_Grid(2, 2); dim3 checkForNoChanges_Block(32, 32); dim3 checkForNoChanges_AllSpins_Grid((int) ceil(((double)N)/32.0), (int) ceil(((double)N)/32.0)); // Execute kernels on the device for(int q = 0; q < k; q++){ if( q%2 == 0){ hipLaunchKernelGGL(( ising), dim3(ising_Grid), dim3(ising_Block) , 0, 0, G_dev, H_dev, w_dev, n); // Check if no changes are made hipLaunchKernelGGL(( checkForNoChanges), dim3(checkForNoChanges_SomeSpins_Grid), dim3(checkForNoChanges_Block), 0, 0, G_dev, H_dev, checkForNoChanges_SomeSpins_dev, n); HANDLE_ERROR( hipMemcpy(checkForNoChanges_SomeSpins, checkForNoChanges_SomeSpins_dev, sizeof(int), hipMemcpyDeviceToHost) ); if(checkForNoChanges_SomeSpins[0] == 0){ hipLaunchKernelGGL(( checkForNoChanges), dim3(checkForNoChanges_AllSpins_Grid), dim3(checkForNoChanges_Block), 0, 0, G_dev, H_dev, checkForNoChanges_AllSpins_dev, n); HANDLE_ERROR( hipMemcpy(checkForNoChanges_AllSpins, checkForNoChanges_AllSpins_dev, sizeof(int), hipMemcpyDeviceToHost) ); if( checkForNoChanges_AllSpins[0] == 0){ printf("\nNo changes: %d iterations\n", q); iterations = q; break; } else{ checkForNoChanges_AllSpins[0] = 0; HANDLE_ERROR( hipMemcpy(checkForNoChanges_AllSpins_dev, checkForNoChanges_AllSpins, sizeof(int), hipMemcpyHostToDevice)); } } else{ checkForNoChanges_SomeSpins[0] = 0; HANDLE_ERROR( hipMemcpy(checkForNoChanges_SomeSpins_dev, checkForNoChanges_SomeSpins, sizeof(int), hipMemcpyHostToDevice)); } } else{ hipLaunchKernelGGL(( ising), dim3(ising_Grid), dim3(ising_Block) , 0, 0, H_dev, G_dev, w_dev, n); // Check if no changes are made hipLaunchKernelGGL(( checkForNoChanges), dim3(checkForNoChanges_SomeSpins_Grid), dim3(checkForNoChanges_Block), 0, 0, G_dev, H_dev, checkForNoChanges_SomeSpins_dev, n); HANDLE_ERROR( hipMemcpy(checkForNoChanges_SomeSpins, checkForNoChanges_SomeSpins_dev, sizeof(int), hipMemcpyDeviceToHost) ); if(checkForNoChanges_SomeSpins[0] == 0){ hipLaunchKernelGGL(( checkForNoChanges), dim3(checkForNoChanges_AllSpins_Grid), dim3(checkForNoChanges_Block), 0, 0, G_dev, H_dev, checkForNoChanges_AllSpins_dev, n); HANDLE_ERROR( hipMemcpy(checkForNoChanges_AllSpins, checkForNoChanges_AllSpins_dev, sizeof(int), hipMemcpyDeviceToHost) ); if( checkForNoChanges_AllSpins[0] == 0){ printf("\nNo changes: %d iterations\n", q); iterations = q; break; } else{ checkForNoChanges_AllSpins[0] = 0; HANDLE_ERROR( hipMemcpy(checkForNoChanges_AllSpins_dev, checkForNoChanges_AllSpins, sizeof(int), hipMemcpyHostToDevice)); } } else{ checkForNoChanges_SomeSpins[0] = 0; HANDLE_ERROR( hipMemcpy(checkForNoChanges_SomeSpins_dev, checkForNoChanges_SomeSpins, sizeof(int), hipMemcpyHostToDevice)); } } } // Write GPU results to host memory if( k%2 == 1) HANDLE_ERROR( hipMemcpy(G_final, H_dev, n*n*sizeof(int), hipMemcpyDeviceToHost) ); else HANDLE_ERROR( hipMemcpy(G_final, G_dev, n*n*sizeof(int), hipMemcpyDeviceToHost) ); // Capture end time HANDLE_ERROR( hipEventRecord( stop, 0 ) ); HANDLE_ERROR( hipEventSynchronize( stop ) ); float elapsedTime; HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) ); printf( "\nTime used for parallel call: %3.3f sec\n", elapsedTime*0.001 ); // Free device memory HANDLE_ERROR( hipFree(G_dev) ); HANDLE_ERROR( hipFree(H_dev) ); HANDLE_ERROR( hipFree(checkForNoChanges_SomeSpins_dev)); HANDLE_ERROR( hipFree(checkForNoChanges_AllSpins_dev)); HANDLE_ERROR( hipEventDestroy( start ) ); HANDLE_ERROR( hipEventDestroy( stop ) ); // Validate results validate(G, G_final, w, iterations, n); // Free host memory free(G); free(G_final); free(checkForNoChanges_SomeSpins); free(checkForNoChanges_AllSpins); return 0; } void validate(int *G, int *G_final, double *w, int k, int n){ printf("\nValidating...\n"); int counter = 0; clock_t start, end; double time_used; start = clock(); // Run sequential code ising_sequential(G, w, k, n); end = clock(); time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("\nTime used for sequential call: %3.3f sec\n",time_used); // Validate for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ if(G[i*n + j] != G_final[i*n + j]){ printf("\nWRONG"); printf("\n%d %d",i, j); printf("\n%d %d\n",G[i*n + j],G_final[i*n + j]); counter++; } } } if(counter == 0) printf("\nValidation: CORRECT\n"); else { printf("\nValidation: Wrong\n"); printf("\n%d wrong values\n",counter); } } void ising_sequential( int *G, double *w, int k, int n){ int *H, *temp; double influence = 0.0; H = (int*)malloc(n*n*sizeof(int)); if(H == NULL){ printf("ERROR: Cannot allocate memory for H. Aborting..."); } for(int q = 0; q < k; q++){ for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ for(int x = 0; x < 5; x++){ for(int y = 0; y < 5; y++){ influence += w[x*5 + y]*G[((i - 2 + x + n)%n)*n + ((j - 2 + y + n)%n)]; } } H[i*n + j] = G[i*n + j]; if(influence > 0.000000001){ H[i*n + j] = 1; } else if(influence < -0.000000001) { H[i*n + j] = -1; } influence = 0.0; } } temp = G; G = H; H = temp; } if(k%2 == 1){ temp = G; G = H; H = temp; for(int i = 0; i < n; i++) for(int j = 0; j < n; j++) G[i*n + j] = H[i*n + j]; } }
ba921ddf30f896356395ddff99681b97ac95f3d1.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "ising_cuda.h" #define N 700 #define K 200 #define SPINS_PER_THREAD_DIM 3 #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } __global__ void checkForNoChanges(int *G, int *H, int *checkForNoChanges, int n){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if( (i < n) && (j < n)){ if( G[i*n + j] != H[i*n + j] ){ checkForNoChanges[0] = 1; } } } __global__ void ising(int *G, int *H, double *w, int n){ int i_start = (threadIdx.x + blockIdx.x * blockDim.x)*SPINS_PER_THREAD_DIM; int j_start = (threadIdx.y + blockIdx.y * blockDim.y)*SPINS_PER_THREAD_DIM; if( (i_start < n) && (j_start < n)){ int i_end = i_start + SPINS_PER_THREAD_DIM; int j_end = j_start + SPINS_PER_THREAD_DIM; if(i_end > n) i_end = n; if(j_end > n) j_end = n; double influence = 0.0; for(int k = i_start; k < i_end; k++){ for(int l = j_start; l < j_end; l++){ for(int x = 0; x < 5; x++){ for(int y = 0; y < 5; y++){ influence += w[x*5 + y]*G[((k - 2 + x + n)%n)*n + ((l - 2 + y + n)%n)]; } } H[k*n + l] = G[k*n + l]; if(influence > 0.000000001){ H[k*n + l] = 1; } else if(influence < -0.000000001) { H[k*n + l] = -1; } influence = 0.0; } } } } int main(int argc, char** argv){ // Declare all variables int n = 0; int k = 0; if (argc != 3) { n = N; k = K; } else { n = atoi(argv[1]); k = atoi(argv[2]); printf("Input n=%d k=%d", n, k); } // Check if SPINS_PER_THREAD_DIM is less than 3 if( SPINS_PER_THREAD_DIM > 3){ printf("\nERROR: SPINS_PER_THREAD_DIM must be less than 3. Aborting...\n"); return -1; } int *G, *G_final, *G_dev, *H_dev; double *w_dev; double w[25] = {0.004 , 0.016 , 0.026 , 0.016 , 0.004 , 0.016 , 0.071 , 0.117 , 0.071 , 0.016 , 0.026 , 0.117 , 0 , 0.117 , 0.026 , 0.016 , 0.071 , 0.117 , 0.071 , 0.016 , 0.004 , 0.016 , 0.026 , 0.016 , 0.004}; int *checkForNoChanges_SomeSpins; int *checkForNoChanges_AllSpins; int *checkForNoChanges_SomeSpins_dev; int *checkForNoChanges_AllSpins_dev; int iterations = k; // Allocate host memory G = (int*)malloc(n*n*sizeof(int)); G_final = (int*)malloc(n*n*sizeof(int)); if(G == NULL || G_final == NULL){ printf("\nERROR: Cannot allocate host memory. Aborting...\n"); return 1; } checkForNoChanges_SomeSpins = (int*)malloc(sizeof(int)); checkForNoChanges_AllSpins = (int*)malloc(sizeof(int)); // Allocate device memory HANDLE_ERROR( cudaMalloc((void**) &G_dev, n*n*sizeof(int)) ); HANDLE_ERROR( cudaMalloc((void**) &H_dev, n*n*sizeof(int)) ); HANDLE_ERROR( cudaMalloc((void**) &w_dev, 25*sizeof(double))); HANDLE_ERROR( cudaMalloc((void**) &checkForNoChanges_SomeSpins_dev, sizeof(int) )); HANDLE_ERROR( cudaMalloc((void**) &checkForNoChanges_AllSpins_dev, sizeof(int) )); // Write to host memory /* Assign random values to G) */ int spin[] = {-1, 1}; for(int i = 0; i < n; i++) for(int j = 0; j < n; j++) G[i*n + j] = spin[rand()%2]; /* Assign values to checking variables */ checkForNoChanges_SomeSpins[0] = 0; checkForNoChanges_AllSpins[0] = 0; // Copy host memory to device memory HANDLE_ERROR( cudaMemcpy(G_dev, G, n*n*sizeof(int), cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpy(w_dev, w, 25*sizeof(double), cudaMemcpyHostToDevice)); HANDLE_ERROR( cudaMemcpy(checkForNoChanges_SomeSpins_dev, checkForNoChanges_SomeSpins, sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR( cudaMemcpy(checkForNoChanges_AllSpins_dev, checkForNoChanges_AllSpins, sizeof(int), cudaMemcpyHostToDevice)); printf("\nComputing...\n"); // Capture start time cudaEvent_t start, stop; HANDLE_ERROR( cudaEventCreate( &start ) ); HANDLE_ERROR( cudaEventCreate( &stop ) ); HANDLE_ERROR( cudaEventRecord( start, 0 ) ); // Set kernel dimesions dim3 ising_Grid((int) ceil((double)N/(double)(32*SPINS_PER_THREAD_DIM)), (int) ceil((double)N/(double)(32*SPINS_PER_THREAD_DIM))); dim3 ising_Block(32, 32); dim3 checkForNoChanges_SomeSpins_Grid(2, 2); dim3 checkForNoChanges_Block(32, 32); dim3 checkForNoChanges_AllSpins_Grid((int) ceil(((double)N)/32.0), (int) ceil(((double)N)/32.0)); // Execute kernels on the device for(int q = 0; q < k; q++){ if( q%2 == 0){ ising<<< ising_Grid, ising_Block >>>(G_dev, H_dev, w_dev, n); // Check if no changes are made checkForNoChanges<<< checkForNoChanges_SomeSpins_Grid, checkForNoChanges_Block>>>(G_dev, H_dev, checkForNoChanges_SomeSpins_dev, n); HANDLE_ERROR( cudaMemcpy(checkForNoChanges_SomeSpins, checkForNoChanges_SomeSpins_dev, sizeof(int), cudaMemcpyDeviceToHost) ); if(checkForNoChanges_SomeSpins[0] == 0){ checkForNoChanges<<< checkForNoChanges_AllSpins_Grid, checkForNoChanges_Block>>>(G_dev, H_dev, checkForNoChanges_AllSpins_dev, n); HANDLE_ERROR( cudaMemcpy(checkForNoChanges_AllSpins, checkForNoChanges_AllSpins_dev, sizeof(int), cudaMemcpyDeviceToHost) ); if( checkForNoChanges_AllSpins[0] == 0){ printf("\nNo changes: %d iterations\n", q); iterations = q; break; } else{ checkForNoChanges_AllSpins[0] = 0; HANDLE_ERROR( cudaMemcpy(checkForNoChanges_AllSpins_dev, checkForNoChanges_AllSpins, sizeof(int), cudaMemcpyHostToDevice)); } } else{ checkForNoChanges_SomeSpins[0] = 0; HANDLE_ERROR( cudaMemcpy(checkForNoChanges_SomeSpins_dev, checkForNoChanges_SomeSpins, sizeof(int), cudaMemcpyHostToDevice)); } } else{ ising<<< ising_Grid, ising_Block >>>(H_dev, G_dev, w_dev, n); // Check if no changes are made checkForNoChanges<<< checkForNoChanges_SomeSpins_Grid, checkForNoChanges_Block>>>(G_dev, H_dev, checkForNoChanges_SomeSpins_dev, n); HANDLE_ERROR( cudaMemcpy(checkForNoChanges_SomeSpins, checkForNoChanges_SomeSpins_dev, sizeof(int), cudaMemcpyDeviceToHost) ); if(checkForNoChanges_SomeSpins[0] == 0){ checkForNoChanges<<< checkForNoChanges_AllSpins_Grid, checkForNoChanges_Block>>>(G_dev, H_dev, checkForNoChanges_AllSpins_dev, n); HANDLE_ERROR( cudaMemcpy(checkForNoChanges_AllSpins, checkForNoChanges_AllSpins_dev, sizeof(int), cudaMemcpyDeviceToHost) ); if( checkForNoChanges_AllSpins[0] == 0){ printf("\nNo changes: %d iterations\n", q); iterations = q; break; } else{ checkForNoChanges_AllSpins[0] = 0; HANDLE_ERROR( cudaMemcpy(checkForNoChanges_AllSpins_dev, checkForNoChanges_AllSpins, sizeof(int), cudaMemcpyHostToDevice)); } } else{ checkForNoChanges_SomeSpins[0] = 0; HANDLE_ERROR( cudaMemcpy(checkForNoChanges_SomeSpins_dev, checkForNoChanges_SomeSpins, sizeof(int), cudaMemcpyHostToDevice)); } } } // Write GPU results to host memory if( k%2 == 1) HANDLE_ERROR( cudaMemcpy(G_final, H_dev, n*n*sizeof(int), cudaMemcpyDeviceToHost) ); else HANDLE_ERROR( cudaMemcpy(G_final, G_dev, n*n*sizeof(int), cudaMemcpyDeviceToHost) ); // Capture end time HANDLE_ERROR( cudaEventRecord( stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); float elapsedTime; HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) ); printf( "\nTime used for parallel call: %3.3f sec\n", elapsedTime*0.001 ); // Free device memory HANDLE_ERROR( cudaFree(G_dev) ); HANDLE_ERROR( cudaFree(H_dev) ); HANDLE_ERROR( cudaFree(checkForNoChanges_SomeSpins_dev)); HANDLE_ERROR( cudaFree(checkForNoChanges_AllSpins_dev)); HANDLE_ERROR( cudaEventDestroy( start ) ); HANDLE_ERROR( cudaEventDestroy( stop ) ); // Validate results validate(G, G_final, w, iterations, n); // Free host memory free(G); free(G_final); free(checkForNoChanges_SomeSpins); free(checkForNoChanges_AllSpins); return 0; } void validate(int *G, int *G_final, double *w, int k, int n){ printf("\nValidating...\n"); int counter = 0; clock_t start, end; double time_used; start = clock(); // Run sequential code ising_sequential(G, w, k, n); end = clock(); time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("\nTime used for sequential call: %3.3f sec\n",time_used); // Validate for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ if(G[i*n + j] != G_final[i*n + j]){ printf("\nWRONG"); printf("\n%d %d",i, j); printf("\n%d %d\n",G[i*n + j],G_final[i*n + j]); counter++; } } } if(counter == 0) printf("\nValidation: CORRECT\n"); else { printf("\nValidation: Wrong\n"); printf("\n%d wrong values\n",counter); } } void ising_sequential( int *G, double *w, int k, int n){ int *H, *temp; double influence = 0.0; H = (int*)malloc(n*n*sizeof(int)); if(H == NULL){ printf("ERROR: Cannot allocate memory for H. Aborting..."); } for(int q = 0; q < k; q++){ for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ for(int x = 0; x < 5; x++){ for(int y = 0; y < 5; y++){ influence += w[x*5 + y]*G[((i - 2 + x + n)%n)*n + ((j - 2 + y + n)%n)]; } } H[i*n + j] = G[i*n + j]; if(influence > 0.000000001){ H[i*n + j] = 1; } else if(influence < -0.000000001) { H[i*n + j] = -1; } influence = 0.0; } } temp = G; G = H; H = temp; } if(k%2 == 1){ temp = G; G = H; H = temp; for(int i = 0; i < n; i++) for(int j = 0; j < n; j++) G[i*n + j] = H[i*n + j]; } }
400a574c6a0a95bd02f877fbfc8b4aaf2aa3c957.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2012-2013 Indian Institute of Technology Kanpur. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY INDIAN INSTITUTE OF TECHNOLOGY KANPUR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INDIAN INSTITUTE OF TECHNOLOGY KANPUR OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of Indian Institute of Technology Kanpur. */ /********************************************************************************** Lock-free skip list for CUDA; tested for CUDA 4.2 on 32-bit Ubuntu 10.10 and 64-bit Ubuntu 12.04. Developed at IIT Kanpur. Inputs: Percentage of add and delete operations (e.g., 30 50 for 30% add and 50% delete) Output: Prints the total time (in milliseconds) to execute the the sequence of operations Compilation flags: -O3 -arch sm_20 -I ~/NVIDIA_GPU_Computing_SDK/C/common/inc/ -DNUM_ITEMS=num_ops -DFACTOR=num_ops_per_thread -DKEYS=num_keys NUM_ITEMS is the total number of operations (mix of add, delete, search) to execute. FACTOR is the number of operations per thread. KEYS is the number of integer keys assumed in the range [10, 9+KEYS]. The paper cited below states that the key range is [0, KEYS-1]. However, we have shifted the range by +10 so that the head sentinel key (the minimum key) can be chosen as zero. Any positive shift other than +10 would also work. The include path ~/NVIDIA_GPU_Computing_SDK/C/common/inc/ is needed for cutil.h. Related work: Prabhakar Misra and Mainak Chaudhuri. Performance Evaluation of Concurrent Lock-free Data Structures on GPUs. In Proceedings of the 18th IEEE International Conference on Parallel and Distributed Systems, December 2012. ***************************************************************************************/ #include"cutil.h" // Comment this if cutil.h is not available #include"cuda_runtime.h" #include"stdio.h" #include"stdlib.h" #include"time.h" #include"assert.h" #if __WORDSIZE == 64 typedef unsigned long long LL; #else typedef unsigned int LL; #endif // Maximum level of a node in the skip list #define MAX_LEVEL 32 // Number of threads per block #define NUM_THREADS 512 // Supported operations #define ADD (0) #define DELETE (1) #define SEARCH (2) class Node; // Definition of generic node class class __attribute__((aligned (16))) Node { public: int topLevel; // Level of the node LL key; // Key value LL next[MAX_LEVEL+1]; // Array of next links // Create a next field from a reference and mark bit __device__ __host__ LL CreateRef(Node* ref, bool mark) { LL val=(LL)ref; val=val|mark; return val; } __device__ __host__ void SetRef(int index, Node* ref, bool mark) { next[index]=CreateRef(ref, mark); } // Extract the reference from a next field __device__ Node* GetReference(int index) { LL ref=next[index]; return (Node*)((ref>>1)<<1); } // Extract the reference and mark bit from a next field __device__ Node* Get(int index, bool* marked) { marked[0]=next[index]%2; return (Node*)((next[index]>>1)<<1); } // CompareAndSet wrapper __device__ bool CompareAndSet(int index, Node* expectedRef, Node* newRef, bool oldMark, bool newMark) { LL oldVal = (LL)expectedRef|oldMark; LL newVal = (LL)newRef|newMark; LL oldValOut=atomicCAS(&(next[index]), oldVal, newVal); if (oldValOut==oldVal) return true; return false; } // Constructor for sentinel nodes Node(LL k) { key=k; topLevel=MAX_LEVEL; int i; for(i=0;i<MAX_LEVEL+1;i++){ next[i]=CreateRef((Node*)NULL, false); } } }; // Definition of lock-free skip list class LockFreeSkipList { public: Node* head; Node* tail; LockFreeSkipList() { Node* h=new Node(0); #if __WORDSIZE == 64 Node* t=new Node((LL)0xffffffffffffffff); #else Node* t=new Node((LL)0xffffffff); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&head, sizeof(Node))); #else hipMalloc((void**)&head, sizeof(Node)); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&tail, sizeof(Node))); #else hipMalloc((void**)&tail, sizeof(Node)); #endif int i; for(i=0;i<h->topLevel+1;i++){ h->SetRef(i, tail, false); } #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMemcpy(head, h, sizeof(Node), hipMemcpyHostToDevice)); #else hipMemcpy(head, h, sizeof(Node), hipMemcpyHostToDevice); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMemcpy(tail, t, sizeof(Node), hipMemcpyHostToDevice)); #else hipMemcpy(tail, t, sizeof(Node), hipMemcpyHostToDevice); #endif } __device__ bool find(LL, Node**, Node**); // Helping method __device__ bool Add(LL); __device__ bool Delete(LL); __device__ bool Search(LL); }; __device__ Node** nodes; // Pool of pre-allocated nodes __device__ unsigned int pointerIndex=0; // Index into pool of free nodes __device__ LL* randoms; // Array storing the levels of the nodes in the free pool // Function for creating a new node when requested by an add operation __device__ Node* GetNewNode(LL key) { LL ind=atomicInc(&pointerIndex, NUM_ITEMS); Node* n=nodes[ind]; n->key=key; n->topLevel=randoms[ind]; int i; for(i=0;i<n->topLevel+1;i++){ n->SetRef(i, NULL, false); } return n; } __device__ LockFreeSkipList* l; // The lock-free skip list // Kernel for initializing device memory __global__ void init(LockFreeSkipList* l1, Node** n, LL* rands) { randoms=rands; nodes=n; l=l1; } // Find the window holding key // On the way clean up logically deleted nodes (those with set marked bit) __device__ bool LockFreeSkipList::find(LL key, Node** preds, Node** succs) { // preds and succs are arrays of pointers int bottomLevel=0; bool marked[]={false}; bool snip; Node* pred=NULL; Node* curr=NULL; Node* succ=NULL; bool beenThereDoneThat; while(true){ beenThereDoneThat = false; pred=head; int level; for(level=MAX_LEVEL;level>=bottomLevel;level--){ curr=pred->GetReference(level); while(true){ succ=curr->Get(level, marked); while(marked[0]){ snip=pred->CompareAndSet(level, curr, succ, false, false); beenThereDoneThat = true; if(!snip) break; curr=pred->GetReference(level); succ=curr->Get(level, marked); beenThereDoneThat = false; } if (beenThereDoneThat && !snip) break; if(curr->key<key){ pred=curr; curr=succ; } else{ break; } } if (beenThereDoneThat && !snip) break; preds[level]=pred; succs[level]=curr; } if (beenThereDoneThat && !snip) continue; return((curr->key==key)); } } __device__ bool LockFreeSkipList::Search(LL key) { int bottomLevel=0; bool marked=false; Node* pred=head; Node* curr=NULL; Node* succ=NULL; int level; for(level=MAX_LEVEL;level>=bottomLevel;level--){ curr=pred->GetReference(level); while(true){ succ=curr->Get(level, &marked); while(marked){ curr=curr->GetReference(level); succ=curr->Get(level, &marked); } if(curr->key<key){ pred=curr; curr=succ; } else{ break; } } } return(curr->key==key); } __device__ bool LockFreeSkipList::Delete(LL key) { int bottomLevel=0; Node* preds[MAX_LEVEL+1]; Node* succs[MAX_LEVEL+1]; Node* succ; bool marked[]={false}; while(true){ bool found=find(key, preds, succs); if(!found){ return false; } else{ Node* nodeToDelete=succs[bottomLevel]; int level; for(level=nodeToDelete->topLevel;level>=bottomLevel+1;level--){ succ=nodeToDelete->Get(level, marked); while(marked[0]==false){ nodeToDelete->CompareAndSet(level, succ, succ, false, true); succ=nodeToDelete->Get(level, marked); } } succ=nodeToDelete->Get(bottomLevel, marked); while(true){ bool iMarkedIt=nodeToDelete->CompareAndSet(bottomLevel, succ, succ, false, true); succ=succs[bottomLevel]->Get(bottomLevel, marked); if(iMarkedIt==true){ find(key, preds, succs); return true; } else if(marked[0]==true){ return false; } } } } } __device__ bool LockFreeSkipList::Add(LL key) { Node* newNode=GetNewNode(key); int topLevel=newNode->topLevel; int bottomLevel=0; Node* preds[MAX_LEVEL+1]; Node* succs[MAX_LEVEL+1]; while(true){ bool found=find(key, preds, succs); if(found){ return false; } else{ int level; for(level=bottomLevel;level<=topLevel;level++){ Node* succ=succs[level]; newNode->SetRef(level, succ, false); } Node* pred=preds[bottomLevel]; Node* succ=succs[bottomLevel]; bool t; t=pred->CompareAndSet(bottomLevel, succ, newNode, false, false); if(!t){ continue; } for(level=bottomLevel+1;level<=topLevel;level++){ while(true){ pred=preds[level]; succ=succs[level]; if(pred->CompareAndSet(level, succ, newNode, false, false)){ break; } find(key, preds, succs); } } return true; } } } __global__ void print() { // For debugging int tid=blockIdx.x*blockDim.x+threadIdx.x; if(tid==0){ Node* p=l->head; bool marked=false; while(p!=NULL){ #if __WORDSIZE == 64 printf("%#llx, %u, marked=%u, address is %p\n", p->key, p->topLevel, marked, p); #else printf("%#x, %u, marked=%u, address is %p\n", p->key, p->topLevel, marked, p); #endif p=p->Get(0, &marked); } printf("\n"); } } // The main kernel __global__ void kernel(LL* items, LL* op, LL* result) { // The array items holds the sequence of keys // The array op holds the sequence of operations // The array result, at the end, will hold the outcome of the operations int tid,i; for(i=0;i<FACTOR;i++){ // FACTOR is the number of operations per thread tid=i*gridDim.x*blockDim.x+blockIdx.x*blockDim.x+threadIdx.x; if(tid>=NUM_ITEMS) return; // Grab the operation and the associated key and execute LL item=items[tid]; if(op[tid]==ADD){ result[tid]=l->Add(item); } if(op[tid]==DELETE){ result[tid]=l->Delete(item); } if(op[tid]==SEARCH){ result[tid]=l->Search(item); } } } // Generate the level of a newly created node LL Randomlevel() { LL v=1; double p=0.5; while(((rand()/(double)(RAND_MAX))<p) && (v<MAX_LEVEL)) v++; return v; } int main(int argc, char** argv) { if (argc != 3) { printf("Need two arguments: percent add ops and percent delete ops (e.g., 30 50 for 30%% add and 50%% delete).\nAborting...\n"); exit(1); } // Extract operations ratio int adds=atoi(argv[1]); int deletes=atoi(argv[2]); if (adds+deletes > 100) { printf("Sum of add and delete precentages exceeds 100.\nAborting...\n"); exit(1); } // Allocate necessary arrays LL* op=(LL*)malloc(sizeof(LL)*NUM_ITEMS); LL* levels=(LL*)malloc(sizeof(LL)*NUM_ITEMS); LL* items=(LL*)malloc(sizeof(LL)*NUM_ITEMS); LL* result=(LL*)malloc(sizeof(LL)*NUM_ITEMS); int i; // NUM_ITEMS is the total number of operations to execute srand(0); for(i=0;i<NUM_ITEMS;i++){ items[i]=10+rand()%KEYS; // Keys associated with operations } // Pre-generated levels of skip list nodes (relevant only if op[i] is add) srand(0); for(i=0;i<NUM_ITEMS;i++){ levels[i]=Randomlevel()-1; } // Populate the sequence of operations for(i=0;i<(NUM_ITEMS*adds)/100;i++){ op[i]=ADD; } for(;i<(NUM_ITEMS*(adds+deletes))/100;i++){ op[i]=DELETE; } for(;i<NUM_ITEMS;i++){ op[i]=SEARCH; } adds=(NUM_ITEMS*adds)/100; // Allocate device memory LL* Citems; LL* Cop; LL* Cresult; LL* Clevels; #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&Cresult, sizeof(LL)*NUM_ITEMS)); CUDA_SAFE_CALL(hipMalloc((void**)&Citems, sizeof(LL)*NUM_ITEMS)); CUDA_SAFE_CALL(hipMalloc((void**)&Cop, sizeof(LL)*NUM_ITEMS)); CUDA_SAFE_CALL(hipMalloc((void**)&Clevels, sizeof(LL)*NUM_ITEMS)); CUDA_SAFE_CALL(hipMemcpy(Clevels, levels, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(Citems, items, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(Cop, op, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice)); #else hipMalloc((void**)&Cresult, sizeof(LL)*NUM_ITEMS); hipMalloc((void**)&Citems, sizeof(LL)*NUM_ITEMS); hipMalloc((void**)&Cop, sizeof(LL)*NUM_ITEMS); hipMalloc((void**)&Clevels, sizeof(LL)*NUM_ITEMS); hipMemcpy(Clevels, levels, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice); hipMemcpy(Citems, items, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice); hipMemcpy(Cop, op, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice); #endif Node** pointers=(Node**)malloc(sizeof(LL)*adds); Node** Cpointers; // Allocate the pool of free nodes for(i=0;i<adds;i++){ #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&pointers[i], sizeof(Node))); #else hipMalloc((void**)&pointers[i], sizeof(Node)); #endif } #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&Cpointers, sizeof(Node*)*adds)); CUDA_SAFE_CALL(hipMemcpy(Cpointers,pointers, sizeof(Node*)*adds, hipMemcpyHostToDevice)); #else hipMalloc((void**)&Cpointers, sizeof(Node*)*adds); hipMemcpy(Cpointers,pointers, sizeof(Node*)*adds, hipMemcpyHostToDevice); #endif // Allocate the skip list LockFreeSkipList* Clist; LockFreeSkipList* list=new LockFreeSkipList(); #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&Clist, sizeof(LockFreeSkipList))); CUDA_SAFE_CALL(hipMemcpy(Clist, list, sizeof(LockFreeSkipList), hipMemcpyHostToDevice)); #else hipMalloc((void**)&Clist, sizeof(LockFreeSkipList)); hipMemcpy(Clist, list, sizeof(LockFreeSkipList), hipMemcpyHostToDevice); #endif // Calculate the number of thread blocks // NUM_ITEMS = total number of operations to execute // NUM_THREADS = number of threads per block // FACTOR = number of operations per thread int blocks=(NUM_ITEMS%(NUM_THREADS*FACTOR)==0)?NUM_ITEMS/(NUM_THREADS*FACTOR):(NUM_ITEMS/(NUM_THREADS*FACTOR))+1; // Error checking code hipError_t error=hipGetLastError(); if(hipSuccess!=error){ printf("error0:CUDA ERROR (%d) {%s}\n",error,hipGetErrorString(error)); exit(-1); } // Initialize the device memory hipLaunchKernelGGL(( init), dim3(1),dim3(32), 0, 0, Clist, Cpointers, Clevels); hipDeviceSynchronize(); // Launch main kernel hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); hipLaunchKernelGGL(( kernel), dim3(blocks),dim3(NUM_THREADS), 0, 0, Citems, Cop, Cresult); hipDeviceSynchronize(); hipEventRecord(stop,0); hipEventSynchronize(stop); float time; hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); // Print kernel execution time in milliseconds printf("%lf\n",time); // Check for errors error=hipGetLastError(); if(hipSuccess!=error){ printf("error1:CUDA ERROR (%d) {%s}\n",error, hipGetErrorString(error)); exit(-1); } // Move results back to host memory #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMemcpy(result, Cresult, sizeof(LL)*NUM_ITEMS, hipMemcpyDeviceToHost)); #else hipMemcpy(result, Cresult, sizeof(LL)*NUM_ITEMS, hipMemcpyDeviceToHost); #endif // Uncomment the following for debugging //print<<<1,32>>>(); hipDeviceSynchronize(); return 0; }
400a574c6a0a95bd02f877fbfc8b4aaf2aa3c957.cu
/* Copyright 2012-2013 Indian Institute of Technology Kanpur. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY INDIAN INSTITUTE OF TECHNOLOGY KANPUR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INDIAN INSTITUTE OF TECHNOLOGY KANPUR OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of Indian Institute of Technology Kanpur. */ /********************************************************************************** Lock-free skip list for CUDA; tested for CUDA 4.2 on 32-bit Ubuntu 10.10 and 64-bit Ubuntu 12.04. Developed at IIT Kanpur. Inputs: Percentage of add and delete operations (e.g., 30 50 for 30% add and 50% delete) Output: Prints the total time (in milliseconds) to execute the the sequence of operations Compilation flags: -O3 -arch sm_20 -I ~/NVIDIA_GPU_Computing_SDK/C/common/inc/ -DNUM_ITEMS=num_ops -DFACTOR=num_ops_per_thread -DKEYS=num_keys NUM_ITEMS is the total number of operations (mix of add, delete, search) to execute. FACTOR is the number of operations per thread. KEYS is the number of integer keys assumed in the range [10, 9+KEYS]. The paper cited below states that the key range is [0, KEYS-1]. However, we have shifted the range by +10 so that the head sentinel key (the minimum key) can be chosen as zero. Any positive shift other than +10 would also work. The include path ~/NVIDIA_GPU_Computing_SDK/C/common/inc/ is needed for cutil.h. Related work: Prabhakar Misra and Mainak Chaudhuri. Performance Evaluation of Concurrent Lock-free Data Structures on GPUs. In Proceedings of the 18th IEEE International Conference on Parallel and Distributed Systems, December 2012. ***************************************************************************************/ #include"cutil.h" // Comment this if cutil.h is not available #include"cuda_runtime.h" #include"stdio.h" #include"stdlib.h" #include"time.h" #include"assert.h" #if __WORDSIZE == 64 typedef unsigned long long LL; #else typedef unsigned int LL; #endif // Maximum level of a node in the skip list #define MAX_LEVEL 32 // Number of threads per block #define NUM_THREADS 512 // Supported operations #define ADD (0) #define DELETE (1) #define SEARCH (2) class Node; // Definition of generic node class class __attribute__((aligned (16))) Node { public: int topLevel; // Level of the node LL key; // Key value LL next[MAX_LEVEL+1]; // Array of next links // Create a next field from a reference and mark bit __device__ __host__ LL CreateRef(Node* ref, bool mark) { LL val=(LL)ref; val=val|mark; return val; } __device__ __host__ void SetRef(int index, Node* ref, bool mark) { next[index]=CreateRef(ref, mark); } // Extract the reference from a next field __device__ Node* GetReference(int index) { LL ref=next[index]; return (Node*)((ref>>1)<<1); } // Extract the reference and mark bit from a next field __device__ Node* Get(int index, bool* marked) { marked[0]=next[index]%2; return (Node*)((next[index]>>1)<<1); } // CompareAndSet wrapper __device__ bool CompareAndSet(int index, Node* expectedRef, Node* newRef, bool oldMark, bool newMark) { LL oldVal = (LL)expectedRef|oldMark; LL newVal = (LL)newRef|newMark; LL oldValOut=atomicCAS(&(next[index]), oldVal, newVal); if (oldValOut==oldVal) return true; return false; } // Constructor for sentinel nodes Node(LL k) { key=k; topLevel=MAX_LEVEL; int i; for(i=0;i<MAX_LEVEL+1;i++){ next[i]=CreateRef((Node*)NULL, false); } } }; // Definition of lock-free skip list class LockFreeSkipList { public: Node* head; Node* tail; LockFreeSkipList() { Node* h=new Node(0); #if __WORDSIZE == 64 Node* t=new Node((LL)0xffffffffffffffff); #else Node* t=new Node((LL)0xffffffff); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&head, sizeof(Node))); #else cudaMalloc((void**)&head, sizeof(Node)); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&tail, sizeof(Node))); #else cudaMalloc((void**)&tail, sizeof(Node)); #endif int i; for(i=0;i<h->topLevel+1;i++){ h->SetRef(i, tail, false); } #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMemcpy(head, h, sizeof(Node), cudaMemcpyHostToDevice)); #else cudaMemcpy(head, h, sizeof(Node), cudaMemcpyHostToDevice); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMemcpy(tail, t, sizeof(Node), cudaMemcpyHostToDevice)); #else cudaMemcpy(tail, t, sizeof(Node), cudaMemcpyHostToDevice); #endif } __device__ bool find(LL, Node**, Node**); // Helping method __device__ bool Add(LL); __device__ bool Delete(LL); __device__ bool Search(LL); }; __device__ Node** nodes; // Pool of pre-allocated nodes __device__ unsigned int pointerIndex=0; // Index into pool of free nodes __device__ LL* randoms; // Array storing the levels of the nodes in the free pool // Function for creating a new node when requested by an add operation __device__ Node* GetNewNode(LL key) { LL ind=atomicInc(&pointerIndex, NUM_ITEMS); Node* n=nodes[ind]; n->key=key; n->topLevel=randoms[ind]; int i; for(i=0;i<n->topLevel+1;i++){ n->SetRef(i, NULL, false); } return n; } __device__ LockFreeSkipList* l; // The lock-free skip list // Kernel for initializing device memory __global__ void init(LockFreeSkipList* l1, Node** n, LL* rands) { randoms=rands; nodes=n; l=l1; } // Find the window holding key // On the way clean up logically deleted nodes (those with set marked bit) __device__ bool LockFreeSkipList::find(LL key, Node** preds, Node** succs) { // preds and succs are arrays of pointers int bottomLevel=0; bool marked[]={false}; bool snip; Node* pred=NULL; Node* curr=NULL; Node* succ=NULL; bool beenThereDoneThat; while(true){ beenThereDoneThat = false; pred=head; int level; for(level=MAX_LEVEL;level>=bottomLevel;level--){ curr=pred->GetReference(level); while(true){ succ=curr->Get(level, marked); while(marked[0]){ snip=pred->CompareAndSet(level, curr, succ, false, false); beenThereDoneThat = true; if(!snip) break; curr=pred->GetReference(level); succ=curr->Get(level, marked); beenThereDoneThat = false; } if (beenThereDoneThat && !snip) break; if(curr->key<key){ pred=curr; curr=succ; } else{ break; } } if (beenThereDoneThat && !snip) break; preds[level]=pred; succs[level]=curr; } if (beenThereDoneThat && !snip) continue; return((curr->key==key)); } } __device__ bool LockFreeSkipList::Search(LL key) { int bottomLevel=0; bool marked=false; Node* pred=head; Node* curr=NULL; Node* succ=NULL; int level; for(level=MAX_LEVEL;level>=bottomLevel;level--){ curr=pred->GetReference(level); while(true){ succ=curr->Get(level, &marked); while(marked){ curr=curr->GetReference(level); succ=curr->Get(level, &marked); } if(curr->key<key){ pred=curr; curr=succ; } else{ break; } } } return(curr->key==key); } __device__ bool LockFreeSkipList::Delete(LL key) { int bottomLevel=0; Node* preds[MAX_LEVEL+1]; Node* succs[MAX_LEVEL+1]; Node* succ; bool marked[]={false}; while(true){ bool found=find(key, preds, succs); if(!found){ return false; } else{ Node* nodeToDelete=succs[bottomLevel]; int level; for(level=nodeToDelete->topLevel;level>=bottomLevel+1;level--){ succ=nodeToDelete->Get(level, marked); while(marked[0]==false){ nodeToDelete->CompareAndSet(level, succ, succ, false, true); succ=nodeToDelete->Get(level, marked); } } succ=nodeToDelete->Get(bottomLevel, marked); while(true){ bool iMarkedIt=nodeToDelete->CompareAndSet(bottomLevel, succ, succ, false, true); succ=succs[bottomLevel]->Get(bottomLevel, marked); if(iMarkedIt==true){ find(key, preds, succs); return true; } else if(marked[0]==true){ return false; } } } } } __device__ bool LockFreeSkipList::Add(LL key) { Node* newNode=GetNewNode(key); int topLevel=newNode->topLevel; int bottomLevel=0; Node* preds[MAX_LEVEL+1]; Node* succs[MAX_LEVEL+1]; while(true){ bool found=find(key, preds, succs); if(found){ return false; } else{ int level; for(level=bottomLevel;level<=topLevel;level++){ Node* succ=succs[level]; newNode->SetRef(level, succ, false); } Node* pred=preds[bottomLevel]; Node* succ=succs[bottomLevel]; bool t; t=pred->CompareAndSet(bottomLevel, succ, newNode, false, false); if(!t){ continue; } for(level=bottomLevel+1;level<=topLevel;level++){ while(true){ pred=preds[level]; succ=succs[level]; if(pred->CompareAndSet(level, succ, newNode, false, false)){ break; } find(key, preds, succs); } } return true; } } } __global__ void print() { // For debugging int tid=blockIdx.x*blockDim.x+threadIdx.x; if(tid==0){ Node* p=l->head; bool marked=false; while(p!=NULL){ #if __WORDSIZE == 64 printf("%#llx, %u, marked=%u, address is %p\n", p->key, p->topLevel, marked, p); #else printf("%#x, %u, marked=%u, address is %p\n", p->key, p->topLevel, marked, p); #endif p=p->Get(0, &marked); } printf("\n"); } } // The main kernel __global__ void kernel(LL* items, LL* op, LL* result) { // The array items holds the sequence of keys // The array op holds the sequence of operations // The array result, at the end, will hold the outcome of the operations int tid,i; for(i=0;i<FACTOR;i++){ // FACTOR is the number of operations per thread tid=i*gridDim.x*blockDim.x+blockIdx.x*blockDim.x+threadIdx.x; if(tid>=NUM_ITEMS) return; // Grab the operation and the associated key and execute LL item=items[tid]; if(op[tid]==ADD){ result[tid]=l->Add(item); } if(op[tid]==DELETE){ result[tid]=l->Delete(item); } if(op[tid]==SEARCH){ result[tid]=l->Search(item); } } } // Generate the level of a newly created node LL Randomlevel() { LL v=1; double p=0.5; while(((rand()/(double)(RAND_MAX))<p) && (v<MAX_LEVEL)) v++; return v; } int main(int argc, char** argv) { if (argc != 3) { printf("Need two arguments: percent add ops and percent delete ops (e.g., 30 50 for 30%% add and 50%% delete).\nAborting...\n"); exit(1); } // Extract operations ratio int adds=atoi(argv[1]); int deletes=atoi(argv[2]); if (adds+deletes > 100) { printf("Sum of add and delete precentages exceeds 100.\nAborting...\n"); exit(1); } // Allocate necessary arrays LL* op=(LL*)malloc(sizeof(LL)*NUM_ITEMS); LL* levels=(LL*)malloc(sizeof(LL)*NUM_ITEMS); LL* items=(LL*)malloc(sizeof(LL)*NUM_ITEMS); LL* result=(LL*)malloc(sizeof(LL)*NUM_ITEMS); int i; // NUM_ITEMS is the total number of operations to execute srand(0); for(i=0;i<NUM_ITEMS;i++){ items[i]=10+rand()%KEYS; // Keys associated with operations } // Pre-generated levels of skip list nodes (relevant only if op[i] is add) srand(0); for(i=0;i<NUM_ITEMS;i++){ levels[i]=Randomlevel()-1; } // Populate the sequence of operations for(i=0;i<(NUM_ITEMS*adds)/100;i++){ op[i]=ADD; } for(;i<(NUM_ITEMS*(adds+deletes))/100;i++){ op[i]=DELETE; } for(;i<NUM_ITEMS;i++){ op[i]=SEARCH; } adds=(NUM_ITEMS*adds)/100; // Allocate device memory LL* Citems; LL* Cop; LL* Cresult; LL* Clevels; #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&Cresult, sizeof(LL)*NUM_ITEMS)); CUDA_SAFE_CALL(cudaMalloc((void**)&Citems, sizeof(LL)*NUM_ITEMS)); CUDA_SAFE_CALL(cudaMalloc((void**)&Cop, sizeof(LL)*NUM_ITEMS)); CUDA_SAFE_CALL(cudaMalloc((void**)&Clevels, sizeof(LL)*NUM_ITEMS)); CUDA_SAFE_CALL(cudaMemcpy(Clevels, levels, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(Citems, items, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(Cop, op, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice)); #else cudaMalloc((void**)&Cresult, sizeof(LL)*NUM_ITEMS); cudaMalloc((void**)&Citems, sizeof(LL)*NUM_ITEMS); cudaMalloc((void**)&Cop, sizeof(LL)*NUM_ITEMS); cudaMalloc((void**)&Clevels, sizeof(LL)*NUM_ITEMS); cudaMemcpy(Clevels, levels, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice); cudaMemcpy(Citems, items, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice); cudaMemcpy(Cop, op, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice); #endif Node** pointers=(Node**)malloc(sizeof(LL)*adds); Node** Cpointers; // Allocate the pool of free nodes for(i=0;i<adds;i++){ #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&pointers[i], sizeof(Node))); #else cudaMalloc((void**)&pointers[i], sizeof(Node)); #endif } #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&Cpointers, sizeof(Node*)*adds)); CUDA_SAFE_CALL(cudaMemcpy(Cpointers,pointers, sizeof(Node*)*adds, cudaMemcpyHostToDevice)); #else cudaMalloc((void**)&Cpointers, sizeof(Node*)*adds); cudaMemcpy(Cpointers,pointers, sizeof(Node*)*adds, cudaMemcpyHostToDevice); #endif // Allocate the skip list LockFreeSkipList* Clist; LockFreeSkipList* list=new LockFreeSkipList(); #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&Clist, sizeof(LockFreeSkipList))); CUDA_SAFE_CALL(cudaMemcpy(Clist, list, sizeof(LockFreeSkipList), cudaMemcpyHostToDevice)); #else cudaMalloc((void**)&Clist, sizeof(LockFreeSkipList)); cudaMemcpy(Clist, list, sizeof(LockFreeSkipList), cudaMemcpyHostToDevice); #endif // Calculate the number of thread blocks // NUM_ITEMS = total number of operations to execute // NUM_THREADS = number of threads per block // FACTOR = number of operations per thread int blocks=(NUM_ITEMS%(NUM_THREADS*FACTOR)==0)?NUM_ITEMS/(NUM_THREADS*FACTOR):(NUM_ITEMS/(NUM_THREADS*FACTOR))+1; // Error checking code cudaError_t error=cudaGetLastError(); if(cudaSuccess!=error){ printf("error0:CUDA ERROR (%d) {%s}\n",error,cudaGetErrorString(error)); exit(-1); } // Initialize the device memory init<<<1,32>>>(Clist, Cpointers, Clevels); cudaThreadSynchronize(); // Launch main kernel cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); kernel<<<blocks,NUM_THREADS>>>(Citems, Cop, Cresult); cudaThreadSynchronize(); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float time; cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); // Print kernel execution time in milliseconds printf("%lf\n",time); // Check for errors error=cudaGetLastError(); if(cudaSuccess!=error){ printf("error1:CUDA ERROR (%d) {%s}\n",error, cudaGetErrorString(error)); exit(-1); } // Move results back to host memory #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMemcpy(result, Cresult, sizeof(LL)*NUM_ITEMS, cudaMemcpyDeviceToHost)); #else cudaMemcpy(result, Cresult, sizeof(LL)*NUM_ITEMS, cudaMemcpyDeviceToHost); #endif // Uncomment the following for debugging //print<<<1,32>>>(); cudaThreadSynchronize(); return 0; }
b7ec88843a020b97b5a3c6fe0e1a8de23b6565c5.hip
// !!! This is a file automatically generated by hipify!!! #include "page_rank.h" #include <stdlib.h> #include <iostream> #include <iomanip> #include <cusp/format.h> #include <cusp/exception.h> #include <thrust/sort.h> #include <thrust/count.h> #include <thrust/extrema.h> #include <thrust/functional.h> #include <sstream> using namespace std; void read_matrix(CAST(COO) &temp, const char *fname) { cusp::io::read_matrix_market_file(temp, fname); // cerr << "Read the matrix\n"; } void normalize(CAST(COO) &adj, CAST(ARR1D) &dangling) { CAST(ARR1D) ones(adj.num_rows, 1); CAST(ARR1D) sum = ones; cusp::detail::timer t; cusp::multiply(adj, ones, sum); // cerr << "Row sum calculated.\n"; /* instantiated an inversion_op (invert) for use in transform */ t.start(); inversion_op<double> invert = inversion_op<double>(); thrust::transform(sum.begin(), sum.end(), sum.begin(), invert); // cerr << "Inversion done.\n"; dangling_op<double> dangle = dangling_op<double>(); thrust::transform(sum.begin(), sum.end(), dangling.begin(), dangle); // cerr << "Dangling nodes found.\n"; CAST(COO) link_mat = adj; //cerr << "ADJ-row: " << adj.row_indices.size() << endl; //cerr << "ADJ-col: " << adj.column_indices.size() << endl; //cerr << "link-row: " << link_mat.row_indices.size() << endl; //cerr << "link-col: " << link_mat.column_indices.size() << endl; //is_valid_matrix(adj); cusp::transpose(adj, link_mat); //cerr << "LINK_MAT=======" << is_valid(link_mat) << endl; adj = link_mat; // cerr << "Transpose calculated.\n"; //cerr << "L_ROW: " << adj.num_rows << endl; //cerr << "L_COL: " << adj.num_cols << endl; //cerr << "L_NNZ: " << adj.num_entries << endl; CAST(COO) dia(adj.num_rows, adj.num_rows, adj.num_rows); thrust::sequence(dia.row_indices.begin(), dia.row_indices.end()); thrust::sequence(dia.column_indices.begin(), dia.column_indices.end()); thrust::copy(sum.begin(), sum.end(), dia.values.begin()); // cerr << "Diagonal Matrix Formed.\n"; if(cusp::is_valid_matrix(adj)) { cusp::multiply(adj, dia, link_mat); // link_mat = adj * dia adj = link_mat; } else { cout << "Invalid format!" << endl; exit(1); } // cerr << "Normalized\n"; // cerr << "TIME:NORMAL: " << t.milliseconds_elapsed() << endl; } void print_array(CAST(ARR1D) rank) { for (int i = 0; i < rank.size(); i++) cout << setprecision(10) << rank[i] << endl; } void pagerank(CAST(COO) &link, double beta, CAST(ARR1D) &rank, CAST(ARR1D) &dangling) { int V = link.num_rows; double beta_V = beta / V; double one_minus_beta = (1 - beta) / V; cusp::detail::timer t; CAST(ARR1D) teleport(V, one_minus_beta); CAST(ARR1D) temp(V), prev_rank(V, 0); t.start(); cusp::blas::fill(rank, 1 / (double) V); if(!is_valid_matrix(link)) { cout << "Link: Invalid format!" << endl; return; } do { // temp = link * rank multiply(link, rank, temp); // rank = temp * beta + dangling * beta_V + teleport * 1 cusp::blas::axpbypcz(temp, dangling, teleport, rank, beta, beta_V, 1); #ifndef CPU hipDeviceSynchronize(); #endif // tolerance check cusp::blas::axpy(rank, prev_rank, -1); check_above_threshold<double> check = check_above_threshold<double>(); if(thrust::count_if(prev_rank.begin(), prev_rank.end(), check) == 0) break; prev_rank = rank; } while(1); // cerr << "TIME:PR: " << t.milliseconds_elapsed() << endl; }
b7ec88843a020b97b5a3c6fe0e1a8de23b6565c5.cu
#include "page_rank.h" #include <stdlib.h> #include <iostream> #include <iomanip> #include <cusp/format.h> #include <cusp/exception.h> #include <thrust/sort.h> #include <thrust/count.h> #include <thrust/extrema.h> #include <thrust/functional.h> #include <sstream> using namespace std; void read_matrix(CAST(COO) &temp, const char *fname) { cusp::io::read_matrix_market_file(temp, fname); // cerr << "Read the matrix\n"; } void normalize(CAST(COO) &adj, CAST(ARR1D) &dangling) { CAST(ARR1D) ones(adj.num_rows, 1); CAST(ARR1D) sum = ones; cusp::detail::timer t; cusp::multiply(adj, ones, sum); // cerr << "Row sum calculated.\n"; /* instantiated an inversion_op (invert) for use in transform */ t.start(); inversion_op<double> invert = inversion_op<double>(); thrust::transform(sum.begin(), sum.end(), sum.begin(), invert); // cerr << "Inversion done.\n"; dangling_op<double> dangle = dangling_op<double>(); thrust::transform(sum.begin(), sum.end(), dangling.begin(), dangle); // cerr << "Dangling nodes found.\n"; CAST(COO) link_mat = adj; //cerr << "ADJ-row: " << adj.row_indices.size() << endl; //cerr << "ADJ-col: " << adj.column_indices.size() << endl; //cerr << "link-row: " << link_mat.row_indices.size() << endl; //cerr << "link-col: " << link_mat.column_indices.size() << endl; //is_valid_matrix(adj); cusp::transpose(adj, link_mat); //cerr << "LINK_MAT=======" << is_valid(link_mat) << endl; adj = link_mat; // cerr << "Transpose calculated.\n"; //cerr << "L_ROW: " << adj.num_rows << endl; //cerr << "L_COL: " << adj.num_cols << endl; //cerr << "L_NNZ: " << adj.num_entries << endl; CAST(COO) dia(adj.num_rows, adj.num_rows, adj.num_rows); thrust::sequence(dia.row_indices.begin(), dia.row_indices.end()); thrust::sequence(dia.column_indices.begin(), dia.column_indices.end()); thrust::copy(sum.begin(), sum.end(), dia.values.begin()); // cerr << "Diagonal Matrix Formed.\n"; if(cusp::is_valid_matrix(adj)) { cusp::multiply(adj, dia, link_mat); // link_mat = adj * dia adj = link_mat; } else { cout << "Invalid format!" << endl; exit(1); } // cerr << "Normalized\n"; // cerr << "TIME:NORMAL: " << t.milliseconds_elapsed() << endl; } void print_array(CAST(ARR1D) rank) { for (int i = 0; i < rank.size(); i++) cout << setprecision(10) << rank[i] << endl; } void pagerank(CAST(COO) &link, double beta, CAST(ARR1D) &rank, CAST(ARR1D) &dangling) { int V = link.num_rows; double beta_V = beta / V; double one_minus_beta = (1 - beta) / V; cusp::detail::timer t; CAST(ARR1D) teleport(V, one_minus_beta); CAST(ARR1D) temp(V), prev_rank(V, 0); t.start(); cusp::blas::fill(rank, 1 / (double) V); if(!is_valid_matrix(link)) { cout << "Link: Invalid format!" << endl; return; } do { // temp = link * rank multiply(link, rank, temp); // rank = temp * beta + dangling * beta_V + teleport * 1 cusp::blas::axpbypcz(temp, dangling, teleport, rank, beta, beta_V, 1); #ifndef CPU cudaThreadSynchronize(); #endif // tolerance check cusp::blas::axpy(rank, prev_rank, -1); check_above_threshold<double> check = check_above_threshold<double>(); if(thrust::count_if(prev_rank.begin(), prev_rank.end(), check) == 0) break; prev_rank = rank; } while(1); // cerr << "TIME:PR: " << t.milliseconds_elapsed() << endl; }
df5ae2037e735fd22a3dfedb9d2231ab95a66b6a.hip
// !!! This is a file automatically generated by hipify!!! #include "ModerGpuWrapper.h" #include "moderngpu\include\util\mgpucontext.h" #include "moderngpu\include\moderngpu.cuh" #include <algorithm> #include <hip/device_functions.h> #include <helper_math.h> using namespace mgpu; namespace modergpu_wrapper { ContextPtr g_context; static void create_context() { if (g_context.get() == nullptr) g_context = CreateCudaDevice(0); } void mergesort(int* val, int count) { create_context(); MergesortKeys(val, count, *g_context); } void mergesort(float* val, int count) { create_context(); MergesortKeys(val, count, *g_context); } void mergesort_by_key(int* keys_global, int* values_global, int count) { create_context(); MergesortPairs(keys_global, values_global, count, *g_context); } void mergesort_by_key(int* keys_global, float* values_global, int count) { create_context(); MergesortPairs(keys_global, values_global, count, *g_context); } void mergesort_by_key(float* keys_global, int* values_global, int count) { create_context(); MergesortPairs(keys_global, values_global, count, *g_context); } void mergesort_by_key(float* keys_global, float* values_global, int count) { create_context(); MergesortPairs(keys_global, values_global, count, *g_context); } void mergesort_by_key(int* keys_global, float4* values_global, int count) { create_context(); MergesortPairs(keys_global, values_global, count, *g_context); } void inclusive_scan_by_key(int* key_d, float4* value_d, float4* dst_d, int n) { } }
df5ae2037e735fd22a3dfedb9d2231ab95a66b6a.cu
#include "ModerGpuWrapper.h" #include "moderngpu\include\util\mgpucontext.h" #include "moderngpu\include\moderngpu.cuh" #include <algorithm> #include <device_functions.h> #include <helper_math.h> using namespace mgpu; namespace modergpu_wrapper { ContextPtr g_context; static void create_context() { if (g_context.get() == nullptr) g_context = CreateCudaDevice(0); } void mergesort(int* val, int count) { create_context(); MergesortKeys(val, count, *g_context); } void mergesort(float* val, int count) { create_context(); MergesortKeys(val, count, *g_context); } void mergesort_by_key(int* keys_global, int* values_global, int count) { create_context(); MergesortPairs(keys_global, values_global, count, *g_context); } void mergesort_by_key(int* keys_global, float* values_global, int count) { create_context(); MergesortPairs(keys_global, values_global, count, *g_context); } void mergesort_by_key(float* keys_global, int* values_global, int count) { create_context(); MergesortPairs(keys_global, values_global, count, *g_context); } void mergesort_by_key(float* keys_global, float* values_global, int count) { create_context(); MergesortPairs(keys_global, values_global, count, *g_context); } void mergesort_by_key(int* keys_global, float4* values_global, int count) { create_context(); MergesortPairs(keys_global, values_global, count, *g_context); } void inclusive_scan_by_key(int* key_d, float4* value_d, float4* dst_d, int n) { } }
11474c3ba9f0aca3620e31ebfc04d7525ba81d49.hip
// !!! This is a file automatically generated by hipify!!! // define DEFINE_GLOBAL to declare all constant variables here // --> must define it BEFORE including CUAPI.h since the latter will include "Macro.h" to set SET_GLOBAL() #define DEFINE_GLOBAL #include "CUAPI.h" #include "CUDA_ConstMemory.h" #undef DEFINE_GLOBAL #ifdef GPU extern real *d_EoS_Table[EOS_NTABLE_MAX]; #ifdef GRAVITY void CUAPI_SetConstMemory_ExtAccPot(); #endif //------------------------------------------------------------------------------------------------------- // Function : CUAPI_SetConstMemory // Description : Set the constant memory variables on GPU // // Note : 1. Adopt the suggested approach for CUDA version >= 5.0 // 2. Invoked by Init_GAMER() // 3. Invoke CUAPI_SetConstMemory_ExtAccPot() // 4. Some constant memory variables are set elsewhere. For example, // (1) Source-term variables are set by individual source-term initializer // (2) EoS variables are set by CUAPI_SetConstMemory_EoS() // // Parameter : None // // Return : c_NormIdx[], c_Mp[], c_Mm[], c_ExtAcc_AuxArray[], c_ExtPot_AuxArray[] //--------------------------------------------------------------------------------------------------- void CUAPI_SetConstMemory() { if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ...\n", __FUNCTION__ ); // hipMemcpyToSymbol() has the default parameters "offset=0, kind=hipMemcpyHostToDevice" and so // we do not repeat them below # if ( NCOMP_PASSIVE > 0 ) CUDA_CHECK_ERROR( hipMemcpyToSymbol( c_NormIdx, PassiveNorm_VarIdx, NCOMP_PASSIVE*sizeof(int) ) ); # endif # ifdef GRAVITY CUAPI_SetConstMemory_ExtAccPot(); const real h_Mp[3] = { -3.0/32.0, +30.0/32.0, +5.0/32.0 }; const real h_Mm[3] = { +5.0/32.0, +30.0/32.0, -3.0/32.0 }; CUDA_CHECK_ERROR( hipMemcpyToSymbol( c_Mp, h_Mp, 3*sizeof(real) ) ); CUDA_CHECK_ERROR( hipMemcpyToSymbol( c_Mm, h_Mm, 3*sizeof(real) ) ); # endif // #ifdef GRAVITY if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ... done\n", __FUNCTION__ ); } // FUNCTION : CUAPI_SetConstMemory #endif // #ifdef GPU
11474c3ba9f0aca3620e31ebfc04d7525ba81d49.cu
// define DEFINE_GLOBAL to declare all constant variables here // --> must define it BEFORE including CUAPI.h since the latter will include "Macro.h" to set SET_GLOBAL() #define DEFINE_GLOBAL #include "CUAPI.h" #include "CUDA_ConstMemory.h" #undef DEFINE_GLOBAL #ifdef GPU extern real *d_EoS_Table[EOS_NTABLE_MAX]; #ifdef GRAVITY void CUAPI_SetConstMemory_ExtAccPot(); #endif //------------------------------------------------------------------------------------------------------- // Function : CUAPI_SetConstMemory // Description : Set the constant memory variables on GPU // // Note : 1. Adopt the suggested approach for CUDA version >= 5.0 // 2. Invoked by Init_GAMER() // 3. Invoke CUAPI_SetConstMemory_ExtAccPot() // 4. Some constant memory variables are set elsewhere. For example, // (1) Source-term variables are set by individual source-term initializer // (2) EoS variables are set by CUAPI_SetConstMemory_EoS() // // Parameter : None // // Return : c_NormIdx[], c_Mp[], c_Mm[], c_ExtAcc_AuxArray[], c_ExtPot_AuxArray[] //--------------------------------------------------------------------------------------------------- void CUAPI_SetConstMemory() { if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ...\n", __FUNCTION__ ); // cudaMemcpyToSymbol() has the default parameters "offset=0, kind=cudaMemcpyHostToDevice" and so // we do not repeat them below # if ( NCOMP_PASSIVE > 0 ) CUDA_CHECK_ERROR( cudaMemcpyToSymbol( c_NormIdx, PassiveNorm_VarIdx, NCOMP_PASSIVE*sizeof(int) ) ); # endif # ifdef GRAVITY CUAPI_SetConstMemory_ExtAccPot(); const real h_Mp[3] = { -3.0/32.0, +30.0/32.0, +5.0/32.0 }; const real h_Mm[3] = { +5.0/32.0, +30.0/32.0, -3.0/32.0 }; CUDA_CHECK_ERROR( cudaMemcpyToSymbol( c_Mp, h_Mp, 3*sizeof(real) ) ); CUDA_CHECK_ERROR( cudaMemcpyToSymbol( c_Mm, h_Mm, 3*sizeof(real) ) ); # endif // #ifdef GRAVITY if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ... done\n", __FUNCTION__ ); } // FUNCTION : CUAPI_SetConstMemory #endif // #ifdef GPU
1f34879d53b32ace0ded6bc3884098c083be0b16.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_reset_field_kernel1; int xdim0_reset_field_kernel1_h = -1; __constant__ int ydim0_reset_field_kernel1; int ydim0_reset_field_kernel1_h = -1; __constant__ int xdim1_reset_field_kernel1; int xdim1_reset_field_kernel1_h = -1; __constant__ int ydim1_reset_field_kernel1; int ydim1_reset_field_kernel1_h = -1; __constant__ int xdim2_reset_field_kernel1; int xdim2_reset_field_kernel1_h = -1; __constant__ int ydim2_reset_field_kernel1; int ydim2_reset_field_kernel1_h = -1; __constant__ int xdim3_reset_field_kernel1; int xdim3_reset_field_kernel1_h = -1; __constant__ int ydim3_reset_field_kernel1; int ydim3_reset_field_kernel1_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x, y, z) \ (x + xdim0_reset_field_kernel1 * (y) + \ xdim0_reset_field_kernel1 * ydim0_reset_field_kernel1 * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_reset_field_kernel1 * (y) + \ xdim1_reset_field_kernel1 * ydim1_reset_field_kernel1 * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_reset_field_kernel1 * (y) + \ xdim2_reset_field_kernel1 * ydim2_reset_field_kernel1 * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_reset_field_kernel1 * (y) + \ xdim3_reset_field_kernel1 * ydim3_reset_field_kernel1 * (z)) // user function __device__ void reset_field_kernel1(double *density0, const double *density1, double *energy0, const double *energy1) { density0[OPS_ACC0(0, 0, 0)] = density1[OPS_ACC1(0, 0, 0)]; energy0[OPS_ACC2(0, 0, 0)] = energy1[OPS_ACC3(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_reset_field_kernel1(double *__restrict arg0, const double *__restrict arg1, double *__restrict arg2, const double *__restrict arg3, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_reset_field_kernel1 + idx_z * 1 * 1 * xdim0_reset_field_kernel1 * ydim0_reset_field_kernel1; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_reset_field_kernel1 + idx_z * 1 * 1 * xdim1_reset_field_kernel1 * ydim1_reset_field_kernel1; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_reset_field_kernel1 + idx_z * 1 * 1 * xdim2_reset_field_kernel1 * ydim2_reset_field_kernel1; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_reset_field_kernel1 + idx_z * 1 * 1 * xdim3_reset_field_kernel1 * ydim3_reset_field_kernel1; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { reset_field_kernel1(arg0, arg1, arg2, arg3); } } // host stub function void ops_par_loop_reset_field_kernel1(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { // Timing double t1, t2, c1, c2; ops_arg args[4] = {arg0, arg1, arg2, arg3}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 4, range, 1)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(1, "reset_field_kernel1"); OPS_kernels[1].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_reset_field_kernel1_h || ydim0 != ydim0_reset_field_kernel1_h || xdim1 != xdim1_reset_field_kernel1_h || ydim1 != ydim1_reset_field_kernel1_h || xdim2 != xdim2_reset_field_kernel1_h || ydim2 != ydim2_reset_field_kernel1_h || xdim3 != xdim3_reset_field_kernel1_h || ydim3 != ydim3_reset_field_kernel1_h) { hipMemcpyToSymbol(xdim0_reset_field_kernel1, &xdim0, sizeof(int)); xdim0_reset_field_kernel1_h = xdim0; hipMemcpyToSymbol(ydim0_reset_field_kernel1, &ydim0, sizeof(int)); ydim0_reset_field_kernel1_h = ydim0; hipMemcpyToSymbol(xdim1_reset_field_kernel1, &xdim1, sizeof(int)); xdim1_reset_field_kernel1_h = xdim1; hipMemcpyToSymbol(ydim1_reset_field_kernel1, &ydim1, sizeof(int)); ydim1_reset_field_kernel1_h = ydim1; hipMemcpyToSymbol(xdim2_reset_field_kernel1, &xdim2, sizeof(int)); xdim2_reset_field_kernel1_h = xdim2; hipMemcpyToSymbol(ydim2_reset_field_kernel1, &ydim2, sizeof(int)); ydim2_reset_field_kernel1_h = ydim2; hipMemcpyToSymbol(xdim3_reset_field_kernel1, &xdim3, sizeof(int)); xdim3_reset_field_kernel1_h = xdim3; hipMemcpyToSymbol(ydim3_reset_field_kernel1, &ydim3, sizeof(int)); ydim3_reset_field_kernel1_h = ydim3; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; char *p_a[4]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args, 4, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[1].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_reset_field_kernel1), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[1].time += t1 - t2; } ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[2], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[1].mpi_time += t2 - t1; OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3); } }
1f34879d53b32ace0ded6bc3884098c083be0b16.cu
// // auto-generated by ops.py // __constant__ int xdim0_reset_field_kernel1; int xdim0_reset_field_kernel1_h = -1; __constant__ int ydim0_reset_field_kernel1; int ydim0_reset_field_kernel1_h = -1; __constant__ int xdim1_reset_field_kernel1; int xdim1_reset_field_kernel1_h = -1; __constant__ int ydim1_reset_field_kernel1; int ydim1_reset_field_kernel1_h = -1; __constant__ int xdim2_reset_field_kernel1; int xdim2_reset_field_kernel1_h = -1; __constant__ int ydim2_reset_field_kernel1; int ydim2_reset_field_kernel1_h = -1; __constant__ int xdim3_reset_field_kernel1; int xdim3_reset_field_kernel1_h = -1; __constant__ int ydim3_reset_field_kernel1; int ydim3_reset_field_kernel1_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x, y, z) \ (x + xdim0_reset_field_kernel1 * (y) + \ xdim0_reset_field_kernel1 * ydim0_reset_field_kernel1 * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_reset_field_kernel1 * (y) + \ xdim1_reset_field_kernel1 * ydim1_reset_field_kernel1 * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_reset_field_kernel1 * (y) + \ xdim2_reset_field_kernel1 * ydim2_reset_field_kernel1 * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_reset_field_kernel1 * (y) + \ xdim3_reset_field_kernel1 * ydim3_reset_field_kernel1 * (z)) // user function __device__ void reset_field_kernel1(double *density0, const double *density1, double *energy0, const double *energy1) { density0[OPS_ACC0(0, 0, 0)] = density1[OPS_ACC1(0, 0, 0)]; energy0[OPS_ACC2(0, 0, 0)] = energy1[OPS_ACC3(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_reset_field_kernel1(double *__restrict arg0, const double *__restrict arg1, double *__restrict arg2, const double *__restrict arg3, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_reset_field_kernel1 + idx_z * 1 * 1 * xdim0_reset_field_kernel1 * ydim0_reset_field_kernel1; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_reset_field_kernel1 + idx_z * 1 * 1 * xdim1_reset_field_kernel1 * ydim1_reset_field_kernel1; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_reset_field_kernel1 + idx_z * 1 * 1 * xdim2_reset_field_kernel1 * ydim2_reset_field_kernel1; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_reset_field_kernel1 + idx_z * 1 * 1 * xdim3_reset_field_kernel1 * ydim3_reset_field_kernel1; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { reset_field_kernel1(arg0, arg1, arg2, arg3); } } // host stub function void ops_par_loop_reset_field_kernel1(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { // Timing double t1, t2, c1, c2; ops_arg args[4] = {arg0, arg1, arg2, arg3}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 4, range, 1)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(1, "reset_field_kernel1"); OPS_kernels[1].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_reset_field_kernel1_h || ydim0 != ydim0_reset_field_kernel1_h || xdim1 != xdim1_reset_field_kernel1_h || ydim1 != ydim1_reset_field_kernel1_h || xdim2 != xdim2_reset_field_kernel1_h || ydim2 != ydim2_reset_field_kernel1_h || xdim3 != xdim3_reset_field_kernel1_h || ydim3 != ydim3_reset_field_kernel1_h) { cudaMemcpyToSymbol(xdim0_reset_field_kernel1, &xdim0, sizeof(int)); xdim0_reset_field_kernel1_h = xdim0; cudaMemcpyToSymbol(ydim0_reset_field_kernel1, &ydim0, sizeof(int)); ydim0_reset_field_kernel1_h = ydim0; cudaMemcpyToSymbol(xdim1_reset_field_kernel1, &xdim1, sizeof(int)); xdim1_reset_field_kernel1_h = xdim1; cudaMemcpyToSymbol(ydim1_reset_field_kernel1, &ydim1, sizeof(int)); ydim1_reset_field_kernel1_h = ydim1; cudaMemcpyToSymbol(xdim2_reset_field_kernel1, &xdim2, sizeof(int)); xdim2_reset_field_kernel1_h = xdim2; cudaMemcpyToSymbol(ydim2_reset_field_kernel1, &ydim2, sizeof(int)); ydim2_reset_field_kernel1_h = ydim2; cudaMemcpyToSymbol(xdim3_reset_field_kernel1, &xdim3, sizeof(int)); xdim3_reset_field_kernel1_h = xdim3; cudaMemcpyToSymbol(ydim3_reset_field_kernel1, &ydim3, sizeof(int)); ydim3_reset_field_kernel1_h = ydim3; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; char *p_a[4]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args, 4, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[1].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_reset_field_kernel1<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[1].time += t1 - t2; } ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[2], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[1].mpi_time += t2 - t1; OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3); } }
e16102ca3b68cbf4a999ff18ba374dd18576cffe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include <cstdlib> #include <cstring> #include "itf/engine/common.hpp" #include "itf/engine/util/math_functions.hpp" namespace itf { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(Engine::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(Engine::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(Engine::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(Engine::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(hipblasSaxpy(Engine::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(hipblasDaxpy(Engine::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(hipblasSscal(Engine::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(hipblasDscal(Engine::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(hipblasSdot(Engine::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(hipblasDdot(Engine::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(hipblasSasum(Engine::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(hipblasDasum(Engine::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(hipblasScopy(Engine::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(Engine::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(hipblasDcopy(Engine::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(Engine::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); __global__ void popc_kernel(const int n, const float* a, const float* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popc(static_cast<uint32_t>(a[index]) ^ static_cast<uint32_t>(b[index])); } } __global__ void popcll_kernel(const int n, const double* a, const double* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popcll(static_cast<uint64_t>(a[index]) ^ static_cast<uint64_t>(b[index])); } } template <> uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x, const float* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( popc_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), (uint32_t) 0, thrust::plus<uint32_t>()); } template <> uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x, const double* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( popcll_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), /* NOLINT_NEXT_LINE(build/include_what_you_use) */ (uint32_t) 0, thrust::plus<uint32_t>()); } void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(hiprandGenerate(Engine::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(hiprandGenerateUniform(Engine::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(hiprandGenerateUniformDouble(Engine::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( hiprandGenerateNormal(Engine::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( hiprandGenerateNormalDouble(Engine::curand_generator(), r, n, mu, sigma)); } } // namespace itf
e16102ca3b68cbf4a999ff18ba374dd18576cffe.cu
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include <cstdlib> #include <cstring> #include "itf/engine/common.hpp" #include "itf/engine/util/math_functions.hpp" namespace itf { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Engine::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Engine::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Engine::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Engine::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Engine::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Engine::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Engine::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Engine::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Engine::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Engine::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Engine::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Engine::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Engine::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Engine::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Engine::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Engine::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); __global__ void popc_kernel(const int n, const float* a, const float* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popc(static_cast<uint32_t>(a[index]) ^ static_cast<uint32_t>(b[index])); } } __global__ void popcll_kernel(const int n, const double* a, const double* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popcll(static_cast<uint64_t>(a[index]) ^ static_cast<uint64_t>(b[index])); } } template <> uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x, const float* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) popc_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), (uint32_t) 0, thrust::plus<uint32_t>()); } template <> uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x, const double* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) popcll_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), /* NOLINT_NEXT_LINE(build/include_what_you_use) */ (uint32_t) 0, thrust::plus<uint32_t>()); } void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Engine::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Engine::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Engine::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Engine::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Engine::curand_generator(), r, n, mu, sigma)); } } // namespace itf
c92370f18b4be5bbfac1ed50c9fbbf4a27882d69.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <math.h> #include <hip/hip_runtime.h> #include "common.h" #define NUM_THREADS 256 extern double size; // // benchmarking program // __device__ void apply_force_gpu(particle_t &particle, particle_t &neighbor) { } __global__ void compute_forces_gpu(particle_t * particles, particle_t** d_bins, int* d_npbins, int n, double bin_dim, int n_bins, int bin_size) { // Get thread (particle) ID int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; } __global__ void move_gpu (particle_t * particles, int n, double size) { // Get thread (particle) ID int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; } __global__ void create_bins_gpu (particle_t* particles, particle_t** d_bins, int* d_npbins, int n, double size, double bin_dim, int n_bins, int bin_size) { } int main( int argc, char **argv ) { // This takes a few seconds to initialize the runtime hipDeviceSynchronize(); if( find_option( argc, argv, "-h" ) >= 0 ) { printf( "Options:\n" ); printf( "-h to see this help\n" ); printf( "-n <int> to set the number of particles\n" ); printf( "-o <filename> to specify the output file name\n" ); return 0; } int n = read_int( argc, argv, "-n", 1000 ); char *savename = read_string( argc, argv, "-o", NULL ); if(!savename) savename = "out.txt"; FILE *fsave = savename ? fopen( savename, "w" ) : NULL; particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) ); set_size( n ); double size = get_size(); init_particles( n, particles ); // GPU particle data structure particle_t * d_particles; hipMalloc((void **) &d_particles, n * sizeof(particle_t)); //Setting up variables for binning double bin_dim = cutoff*1; int n_bins = size/bin_dim +1; //Bin data structure - Hold pointer to particles in that bin - Efficient? //Each bin can contain twice the size of max_particles - vary after profiling int bin_size = 2; particle_t* *d_bins; hipMalloc((void **) &d_bins, n_bins*n_bins*bin_size*sizeof(particle_t*)); //Holds the number of particles in each bin int* d_npbins; hipMalloc((void **) &d_npbins, n_bins*n_bins*sizeof(int)); hipDeviceSynchronize(); double copy_time = read_timer( ); // Copy the particles to the GPU hipMemcpy(d_particles, particles, n * sizeof(particle_t), hipMemcpyHostToDevice); hipDeviceSynchronize(); copy_time = read_timer( ) - copy_time; // // simulate a number of time steps // hipDeviceSynchronize(); double simulation_time = read_timer( ); for( int step = 0; step < NSTEPS; step++ ) { int blks = (n + NUM_THREADS - 1) / NUM_THREADS; // // Initialise the particle count array and do binning // hipMemset(d_npbins, 0, n_bins*n_bins*sizeof(int)); hipLaunchKernelGGL(( create_bins_gpu) , dim3(blks), dim3(NUM_THREADS) , 0, 0, d_particles, d_bins, d_npbins, n, size, bin_dim, n_bins, bin_size); // // compute forces // hipLaunchKernelGGL(( compute_forces_gpu) , dim3(blks), dim3(NUM_THREADS) , 0, 0, d_particles, d_bins, d_npbins, n, bin_dim, n_bins, bin_size); // // move particles // hipLaunchKernelGGL(( move_gpu) , dim3(blks), dim3(NUM_THREADS) , 0, 0, d_particles, n, size); // Moving this down so that the file dump time is not included in execution time // save if necessary // //if( fsave && (step%SAVEFREQ) == 0 ) { // // Copy the particles back to the CPU // hipMemcpy(particles, d_particles, n * sizeof(particle_t), hipMemcpyDeviceToHost); // save( fsave, n, particles); //} } hipDeviceSynchronize(); simulation_time = read_timer( ) - simulation_time; hipMemcpy(particles, d_particles, n * sizeof(particle_t), hipMemcpyDeviceToHost); save( fsave, n, particles); printf( "CPU-GPU copy time = %g seconds\n", copy_time); printf( "n = %d, simulation time = %g seconds\n", n, simulation_time ); free( particles ); hipFree(d_particles); if( fsave ) fclose( fsave ); return 0; }
c92370f18b4be5bbfac1ed50c9fbbf4a27882d69.cu
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <math.h> #include <cuda.h> #include "common.h" #define NUM_THREADS 256 extern double size; // // benchmarking program // __device__ void apply_force_gpu(particle_t &particle, particle_t &neighbor) { } __global__ void compute_forces_gpu(particle_t * particles, particle_t** d_bins, int* d_npbins, int n, double bin_dim, int n_bins, int bin_size) { // Get thread (particle) ID int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; } __global__ void move_gpu (particle_t * particles, int n, double size) { // Get thread (particle) ID int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; } __global__ void create_bins_gpu (particle_t* particles, particle_t** d_bins, int* d_npbins, int n, double size, double bin_dim, int n_bins, int bin_size) { } int main( int argc, char **argv ) { // This takes a few seconds to initialize the runtime cudaThreadSynchronize(); if( find_option( argc, argv, "-h" ) >= 0 ) { printf( "Options:\n" ); printf( "-h to see this help\n" ); printf( "-n <int> to set the number of particles\n" ); printf( "-o <filename> to specify the output file name\n" ); return 0; } int n = read_int( argc, argv, "-n", 1000 ); char *savename = read_string( argc, argv, "-o", NULL ); if(!savename) savename = "out.txt"; FILE *fsave = savename ? fopen( savename, "w" ) : NULL; particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) ); set_size( n ); double size = get_size(); init_particles( n, particles ); // GPU particle data structure particle_t * d_particles; cudaMalloc((void **) &d_particles, n * sizeof(particle_t)); //Setting up variables for binning double bin_dim = cutoff*1; int n_bins = size/bin_dim +1; //Bin data structure - Hold pointer to particles in that bin - Efficient? //Each bin can contain twice the size of max_particles - vary after profiling int bin_size = 2; particle_t* *d_bins; cudaMalloc((void **) &d_bins, n_bins*n_bins*bin_size*sizeof(particle_t*)); //Holds the number of particles in each bin int* d_npbins; cudaMalloc((void **) &d_npbins, n_bins*n_bins*sizeof(int)); cudaThreadSynchronize(); double copy_time = read_timer( ); // Copy the particles to the GPU cudaMemcpy(d_particles, particles, n * sizeof(particle_t), cudaMemcpyHostToDevice); cudaThreadSynchronize(); copy_time = read_timer( ) - copy_time; // // simulate a number of time steps // cudaThreadSynchronize(); double simulation_time = read_timer( ); for( int step = 0; step < NSTEPS; step++ ) { int blks = (n + NUM_THREADS - 1) / NUM_THREADS; // // Initialise the particle count array and do binning // cudaMemset(d_npbins, 0, n_bins*n_bins*sizeof(int)); create_bins_gpu <<< blks, NUM_THREADS >>> (d_particles, d_bins, d_npbins, n, size, bin_dim, n_bins, bin_size); // // compute forces // compute_forces_gpu <<< blks, NUM_THREADS >>> (d_particles, d_bins, d_npbins, n, bin_dim, n_bins, bin_size); // // move particles // move_gpu <<< blks, NUM_THREADS >>> (d_particles, n, size); // Moving this down so that the file dump time is not included in execution time // save if necessary // //if( fsave && (step%SAVEFREQ) == 0 ) { // // Copy the particles back to the CPU // cudaMemcpy(particles, d_particles, n * sizeof(particle_t), cudaMemcpyDeviceToHost); // save( fsave, n, particles); //} } cudaThreadSynchronize(); simulation_time = read_timer( ) - simulation_time; cudaMemcpy(particles, d_particles, n * sizeof(particle_t), cudaMemcpyDeviceToHost); save( fsave, n, particles); printf( "CPU-GPU copy time = %g seconds\n", copy_time); printf( "n = %d, simulation time = %g seconds\n", n, simulation_time ); free( particles ); cudaFree(d_particles); if( fsave ) fclose( fsave ); return 0; }
1dba268fb86fc6eefe3f522bd53c5c1f8f8148ea.hip
// !!! This is a file automatically generated by hipify!!! /* * unit_tests.cu * * Created on: Jul 12, 2013 * Author: brandonkelly */ // standard includes #include <iostream> #include <time.h> // local includes #include "UnitTests.cuh" /* list of unit tests: * * - test rank-1 cholesky update * - make sure Chi::Propose follows a multivariate normal distribution * - make sure Chi::Accept always accepts when the proposal and the current values are the same * - make sure we accept and save a Chi value with a much higher posterior * - Test Chi::Adapt acceptance rate and covariance by running a simple MCMC sampler * - make sure PopulationPar::Propose follow a multivariate normal distribution * - make sure that PopulationPar::Accept always accepts when the logdensities are the same * - make sure PopulationPar::Update always accepts when the proposed and current theta values are the same * - make sure we accept and save a PopulationPar value with the posterior is much higher * - Test PopulationPar::Adapt acceptance rate and covariance by running a simple MCMC sampler * - Test DataAugmentation::GetChi * - make sure DataAugmentation::Update always accepts when the proposed and current chi values are the same * - make sure we accept and save a Chi value when the posterior is much higher * - make sure we get the correct acceptance rate and covariance of the characteristics by keeping the population * parameter fixed * */ int main(int argc, char** argv) { int ndata = 100000; bool check_memory = false; // set to true if you want to check how much memory is available without running the tests if (check_memory) { size_t free, total; hipMemGetInfo(&free, &total); std::cout << "free: " << free / 1024 << ", total: " << total / 1024 << std::endl; } // Cuda grid launch dim3 nThreads(256); dim3 nBlocks((ndata + nThreads.x-1) / nThreads.x); printf("nBlocks: %d\n", nBlocks.x); // no more than 64k blocks! if (nBlocks.x > 65535) { std::cerr << "ERROR: Block is too large" << std::endl; return 2; } rng.seed(123456); // keep the host-side seed constant to make the unit tests reproducible { /* * RUN THE UNIT TESTS */ UnitTests Tests(ndata, nBlocks, nThreads); bool save_meas = false; if (save_meas) { Tests.SaveMeasurements(); } // test the rank-1 cholesky update Tests.R1CholUpdate(); // test that pointers are correctly set Tests.GibbsSamplerPtr(); // tests for the characteristic class Tests.ChiPropose(); Tests.ChiAcceptSame(); Tests.ChiAdapt(); // tests for population parameter class Tests.ThetaPropose(); Tests.ThetaAcceptSame(); Tests.ThetaAdapt(); // tests for device-side functions used in updated the characteristics Tests.DevicePropose(); Tests.DeviceAccept(); Tests.DeviceAdapt(); // tests for the data augmentation class Tests.DaugGetChi(); Tests.DaugLogDensPtr(); Tests.DaugAcceptSame(); Tests.DaugAcceptBetter(); // // // tests for the MCMC sampler Tests.FixedChar(); Tests.FixedPopPar(); time_t timer1, timer2; time(&timer1); Tests.NormNorm(); time(&timer2); double seconds = difftime(timer2, timer1); std::cout << "MCMC sampler for normal-normal model took " << seconds << " seconds." << std::endl; // print results Tests.Finish(); } if (check_memory) { size_t free, total; CUDA_CHECK_RETURN(hipDeviceReset()); hipMemGetInfo(&free, &total); std::cout << "free: " << free / 1024 << ", total: " << total / 1024 << std::endl; } }
1dba268fb86fc6eefe3f522bd53c5c1f8f8148ea.cu
/* * unit_tests.cu * * Created on: Jul 12, 2013 * Author: brandonkelly */ // standard includes #include <iostream> #include <time.h> // local includes #include "UnitTests.cuh" /* list of unit tests: * * - test rank-1 cholesky update * - make sure Chi::Propose follows a multivariate normal distribution * - make sure Chi::Accept always accepts when the proposal and the current values are the same * - make sure we accept and save a Chi value with a much higher posterior * - Test Chi::Adapt acceptance rate and covariance by running a simple MCMC sampler * - make sure PopulationPar::Propose follow a multivariate normal distribution * - make sure that PopulationPar::Accept always accepts when the logdensities are the same * - make sure PopulationPar::Update always accepts when the proposed and current theta values are the same * - make sure we accept and save a PopulationPar value with the posterior is much higher * - Test PopulationPar::Adapt acceptance rate and covariance by running a simple MCMC sampler * - Test DataAugmentation::GetChi * - make sure DataAugmentation::Update always accepts when the proposed and current chi values are the same * - make sure we accept and save a Chi value when the posterior is much higher * - make sure we get the correct acceptance rate and covariance of the characteristics by keeping the population * parameter fixed * */ int main(int argc, char** argv) { int ndata = 100000; bool check_memory = false; // set to true if you want to check how much memory is available without running the tests if (check_memory) { size_t free, total; cudaMemGetInfo(&free, &total); std::cout << "free: " << free / 1024 << ", total: " << total / 1024 << std::endl; } // Cuda grid launch dim3 nThreads(256); dim3 nBlocks((ndata + nThreads.x-1) / nThreads.x); printf("nBlocks: %d\n", nBlocks.x); // no more than 64k blocks! if (nBlocks.x > 65535) { std::cerr << "ERROR: Block is too large" << std::endl; return 2; } rng.seed(123456); // keep the host-side seed constant to make the unit tests reproducible { /* * RUN THE UNIT TESTS */ UnitTests Tests(ndata, nBlocks, nThreads); bool save_meas = false; if (save_meas) { Tests.SaveMeasurements(); } // test the rank-1 cholesky update Tests.R1CholUpdate(); // test that pointers are correctly set Tests.GibbsSamplerPtr(); // tests for the characteristic class Tests.ChiPropose(); Tests.ChiAcceptSame(); Tests.ChiAdapt(); // tests for population parameter class Tests.ThetaPropose(); Tests.ThetaAcceptSame(); Tests.ThetaAdapt(); // tests for device-side functions used in updated the characteristics Tests.DevicePropose(); Tests.DeviceAccept(); Tests.DeviceAdapt(); // tests for the data augmentation class Tests.DaugGetChi(); Tests.DaugLogDensPtr(); Tests.DaugAcceptSame(); Tests.DaugAcceptBetter(); // // // tests for the MCMC sampler Tests.FixedChar(); Tests.FixedPopPar(); time_t timer1, timer2; time(&timer1); Tests.NormNorm(); time(&timer2); double seconds = difftime(timer2, timer1); std::cout << "MCMC sampler for normal-normal model took " << seconds << " seconds." << std::endl; // print results Tests.Finish(); } if (check_memory) { size_t free, total; CUDA_CHECK_RETURN(cudaDeviceReset()); cudaMemGetInfo(&free, &total); std::cout << "free: " << free / 1024 << ", total: " << total / 1024 << std::endl; } }
483d2e6cdd3a97d69544e8154c59871cf2ebbde8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _MSC_VER #define _USE_MATH_DEFINES // For M_PI #endif // _MSC_VER #include <cmath> #include "caffe2/operators/roi_align_rotated_gradient_op.h" #include <stdio.h> #include <cfloat> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" namespace caffe2 { namespace { template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T& w1, T& w2, T& w3, T& w4, int& x_low, int& x_high, int& y_low, int& y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void RoIAlignRotatedBackward( const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois, bool continuous_coordinate) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; // Do not round T roi_offset = continuous_coordinate ? T(0.5) : 0; T roi_center_w = offset_bottom_rois[1] * spatial_scale - roi_offset; T roi_center_h = offset_bottom_rois[2] * spatial_scale - roi_offset; T roi_width = offset_bottom_rois[3] * spatial_scale; T roi_height = offset_bottom_rois[4] * spatial_scale; T theta = offset_bottom_rois[5] * M_PI / 180.0; if (!continuous_coordinate) { // backward compatiblity // Force malformed ROIs to be 1x1 roi_width = c10::hip::compat::max(roi_width, (T)1.); roi_height = c10::hip::compat::max(roi_height, (T)1.); } T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). // Appropriate translation needs to be applied after. T roi_start_h = -roi_height / 2.0; T roi_start_w = -roi_width / 2.0; T cosTheta = cos(theta); T sinTheta = sin(theta); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const T yy = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T xx = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); // Rotate by theta around the center and translate T x = xx * cosTheta + yy * sinTheta + roi_center_w; T y = yy * cosTheta - xx * sinTheta + roi_center_h; T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { gpu_atomic_add( static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low); gpu_atomic_add( static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high); gpu_atomic_add( static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low); gpu_atomic_add( static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignRotatedBackward } // namespace template <> bool RoIAlignRotatedGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Input data to pool auto& R = Input(1); // RoIs auto& dY = Input(2); // Gradient of net w.r.t. output of "forward" op // (aka "gradOutput") auto* dX = Output( 0, X.sizes(), at::dtype<float>()); // Gradient of net w.r.t. input to // "forward" op (aka "gradInput") // Must zero-out dX before accumulating gradients math::Set<float, CUDAContext>( dX->numel(), 0.f, dX->mutable_data<float>(), &context_); if (dY.numel() > 0) { // Handle possibly empty gradient if there were no rois hipLaunchKernelGGL(( RoIAlignRotatedBackward<float>) , dim3(CAFFE_GET_BLOCKS(dY.numel())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), dY.numel(), dY.data<float>(), R.dim32(0), spatial_scale_, X.dim32(1), X.dim32(2), X.dim32(3), pooled_height_, pooled_width_, sampling_ratio_, dX->mutable_data<float>(), R.data<float>(), aligned_); } return true; } REGISTER_CUDA_OPERATOR( RoIAlignRotatedGradient, RoIAlignRotatedGradientOp<float, CUDAContext>); } // namespace caffe2
483d2e6cdd3a97d69544e8154c59871cf2ebbde8.cu
#ifdef _MSC_VER #define _USE_MATH_DEFINES // For M_PI #endif // _MSC_VER #include <cmath> #include "caffe2/operators/roi_align_rotated_gradient_op.h" #include <stdio.h> #include <cfloat> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" namespace caffe2 { namespace { template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T& w1, T& w2, T& w3, T& w4, int& x_low, int& x_high, int& y_low, int& y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void RoIAlignRotatedBackward( const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois, bool continuous_coordinate) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; // Do not round T roi_offset = continuous_coordinate ? T(0.5) : 0; T roi_center_w = offset_bottom_rois[1] * spatial_scale - roi_offset; T roi_center_h = offset_bottom_rois[2] * spatial_scale - roi_offset; T roi_width = offset_bottom_rois[3] * spatial_scale; T roi_height = offset_bottom_rois[4] * spatial_scale; T theta = offset_bottom_rois[5] * M_PI / 180.0; if (!continuous_coordinate) { // backward compatiblity // Force malformed ROIs to be 1x1 roi_width = c10::cuda::compat::max(roi_width, (T)1.); roi_height = c10::cuda::compat::max(roi_height, (T)1.); } T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). // Appropriate translation needs to be applied after. T roi_start_h = -roi_height / 2.0; T roi_start_w = -roi_width / 2.0; T cosTheta = cos(theta); T sinTheta = sin(theta); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const T yy = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T xx = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); // Rotate by theta around the center and translate T x = xx * cosTheta + yy * sinTheta + roi_center_w; T y = yy * cosTheta - xx * sinTheta + roi_center_h; T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { gpu_atomic_add( static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low); gpu_atomic_add( static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high); gpu_atomic_add( static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low); gpu_atomic_add( static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignRotatedBackward } // namespace template <> bool RoIAlignRotatedGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Input data to pool auto& R = Input(1); // RoIs auto& dY = Input(2); // Gradient of net w.r.t. output of "forward" op // (aka "gradOutput") auto* dX = Output( 0, X.sizes(), at::dtype<float>()); // Gradient of net w.r.t. input to // "forward" op (aka "gradInput") // Must zero-out dX before accumulating gradients math::Set<float, CUDAContext>( dX->numel(), 0.f, dX->mutable_data<float>(), &context_); if (dY.numel() > 0) { // Handle possibly empty gradient if there were no rois RoIAlignRotatedBackward<float> <<<CAFFE_GET_BLOCKS(dY.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( dY.numel(), dY.data<float>(), R.dim32(0), spatial_scale_, X.dim32(1), X.dim32(2), X.dim32(3), pooled_height_, pooled_width_, sampling_ratio_, dX->mutable_data<float>(), R.data<float>(), aligned_); } return true; } REGISTER_CUDA_OPERATOR( RoIAlignRotatedGradient, RoIAlignRotatedGradientOp<float, CUDAContext>); } // namespace caffe2
ac78e04206d7805eee1bb53fcdd057d6820f9a28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __constant__ float *c_Kernel; __global__ void average(float *d_ip_v, float *d_ip_ir, int app_len) { const int X = blockIdx.x * blockDim.x + threadIdx.x; if (X < app_len) { d_ip_v[X] = (d_ip_v[X] + d_ip_ir[X]) / 2; } }
ac78e04206d7805eee1bb53fcdd057d6820f9a28.cu
#include "includes.h" __constant__ float *c_Kernel; __global__ void average(float *d_ip_v, float *d_ip_ir, int app_len) { const int X = blockIdx.x * blockDim.x + threadIdx.x; if (X < app_len) { d_ip_v[X] = (d_ip_v[X] + d_ip_ir[X]) / 2; } }
eda964416c5b8df896083e3ba695469c5d60818a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (C) 2015 Davis E. King ([email protected]) // License: Boost Software License See LICENSE.txt for the full license. #include "cuda_utils.h" #include "cuda_dlib.h" // ------------------------------------------------------------------------------------ __global__ void cuda_add_arrays(const float* a, const float* b, float* out, size_t n) { out[0] += a[0]+b[0]; } void add_arrays() { hipLaunchKernelGGL(( cuda_add_arrays), dim3(512),dim3(512), 0, 0, 0,0,0,0); } // ------------------------------------------------------------------------------------
eda964416c5b8df896083e3ba695469c5d60818a.cu
// Copyright (C) 2015 Davis E. King ([email protected]) // License: Boost Software License See LICENSE.txt for the full license. #include "cuda_utils.h" #include "cuda_dlib.h" // ------------------------------------------------------------------------------------ __global__ void cuda_add_arrays(const float* a, const float* b, float* out, size_t n) { out[0] += a[0]+b[0]; } void add_arrays() { cuda_add_arrays<<<512,512>>>(0,0,0,0); } // ------------------------------------------------------------------------------------
5da904d4834937f66395a7dc508c0349fb4cecd4.hip
// !!! This is a file automatically generated by hipify!!! # include "taylor_shift.cu" # include "taylor_shift_cpu.cu" # include "taylor_shift_conf.h" # include "taylor_shift_kernel.cu" int main(int argc, char* argv[]) { // temporal data float total_time; hipEvent_t start, stop; /* Initial and final time */ // TIME hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // declaration of variables int n, e, p; double pinv; error_message(argc); p = atoi(argv[2]); pinv = (double) (1<<30)/p; // 1/p; n = size_file(argv[1]); e = (int) log2((double) n); taylor_shift_GPU(n, e, argv[1], p, pinv); // TIME hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&total_time, start, stop); hipEventDestroy(stop); total_time /= 1000.0; printf(" * total_time = %.6f s\n\n", total_time); return 0; }
5da904d4834937f66395a7dc508c0349fb4cecd4.cu
# include "taylor_shift.cu" # include "taylor_shift_cpu.cu" # include "taylor_shift_conf.h" # include "taylor_shift_kernel.cu" int main(int argc, char* argv[]) { // temporal data float total_time; cudaEvent_t start, stop; /* Initial and final time */ // TIME cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // declaration of variables int n, e, p; double pinv; error_message(argc); p = atoi(argv[2]); pinv = (double) (1<<30)/p; // 1/p; n = size_file(argv[1]); e = (int) log2((double) n); taylor_shift_GPU(n, e, argv[1], p, pinv); // TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&total_time, start, stop); cudaEventDestroy(stop); total_time /= 1000.0; printf(" * total_time = %.6f s\n\n", total_time); return 0; }
6e9807a11baa4c352824761433fadcd815b794cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/convolutions.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <math/templatemath.h> namespace sd { namespace ops { ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static __global__ void avgPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if(hstart < 0){ int f = sd::math::nd4j_ceil<Z,int>((Z) -hstart / (Z)dH); hstart += f * dH; } if(wstart < 0){ int f = sd::math::nd4j_ceil<Z,int>((Z) -wstart / (Z) dW); wstart += f * dW; } if(hend > iH){ int f = sd::math::nd4j_ceil<Z,int>((Z) (hend-iH) / (Z) dH); hend -= f * dH; } if(wend > iW){ int f = sd::math::nd4j_ceil<Z,int>((Z) (wend-iW) / (Z) dW); wend -= f * dW; } //Accounts for dilation int pool_size = sd::math::nd4j_ceil<double,int>((double) (hend-hstart) / (double) dH) * sd::math::nd4j_ceil<double,int>((double) (wend-wstart) / (double) dW); Z sum = 0.0f; const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) for (int w = wstart; w < wend; w += dW) sum += static_cast<Z>(inSlice[h * strideY + w * strideX]); int divide_factor = pool_size; //Case 0: exclude padding if (extraParam0 == 1) //Case 1: include padding divide_factor = kH * kW; z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = sum / static_cast<Z>(divide_factor); } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void avgPooling2dCudaLauncher(sd::LaunchContext & block, const void *vx, const Nd4jLong *vxShapeInfo, void *vz, const Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { hipLaunchKernelGGL(( avgPooling2dCuda<X, Z>), dim3(512), dim3(512), 4192, *block.getCudaStream(), vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static __global__ void pnormPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; __shared__ bool fOrder; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if (hstart < 0) { int f = sd::math::nd4j_ceil<Z, int>((Z) -hstart / (Z) dH); hstart += f * dH; } if (wstart < 0) { int f = sd::math::nd4j_ceil<Z, int>((Z) -wstart / (Z) dW); wstart += f * dW; } if (hend > iH) { int f = sd::math::nd4j_ceil<Z, int>((Z) (hend - iH) / (Z) dH); hend -= f * dH; } if (wend > iW) { int f = sd::math::nd4j_ceil<Z, int>((Z) (wend - iW) / (Z) dW); wend -= f * dW; } //Accounts for dilation int pool_size = sd::math::nd4j_ceil<double, int>((double) (hend - hstart) / (double) dH) * sd::math::nd4j_ceil<double, int>((double) (wend - wstart) / (double) dW); Z sum = 0.f; const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) for (int w = wstart; w < wend; w += dW) sum += sd::math::nd4j_pow<Z, Z, Z>(static_cast<Z>(sd::math::nd4j_abs<X>(inSlice[h * strideY + w * strideX])), extraParam0); z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = sd::math::nd4j_pow<Z, Z, Z>(sum, (Z) 1.0f / extraParam0); } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void pnormPooling2dCudaLauncher(sd::LaunchContext & block, const void *vx, const Nd4jLong *vxShapeInfo, void *vz, const Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { hipLaunchKernelGGL(( pnormPooling2dCuda<X, Z>), dim3(512), dim3(512), 4192, *block.getCudaStream(), vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static __global__ void maxPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; __shared__ bool fOrder; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if(hstart < 0){ int f = sd::math::nd4j_ceil<Z,int>((Z) -hstart / (Z)dH); hstart += f * dH; } if(wstart < 0){ int f = sd::math::nd4j_ceil<Z,int>((Z) -wstart / (Z) dW); wstart += f * dW; } if(hend > iH){ int f = sd::math::nd4j_ceil<Z,int>((Z) (hend-iH) / (Z) dH); hend -= f * dH; } if(wend > iW){ int f = sd::math::nd4j_ceil<Z,int>((Z) (wend-iW) / (Z) dW); wend -= f * dW; } //Accounts for dilation int pool_size = sd::math::nd4j_ceil<double,int>((double) (hend-hstart) / (double) dH) * sd::math::nd4j_ceil<double,int>((double) (wend-wstart) / (double) dW); Z max = -sd::DataTypeUtils::max<Z>(); const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { Z v = static_cast<Z>(inSlice[h * strideY + w * strideX]); if (v > max) max = v; } } z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = max; } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void maxPooling2dCudaLauncher(sd::LaunchContext & block, const void *vx, const Nd4jLong *vxShapeInfo, void *vz, const Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { hipLaunchKernelGGL(( maxPooling2dCuda<X,Z>), dim3(512), dim3(512), 4192, *block.getCudaStream(), vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling2d(sd::graph::Context& block, const NDArray& input, NDArray& output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const PoolingType poolingMode, const int extraParam0) { if(!input.isActualOnDeviceSide()) input.syncToDevice(); switch (poolingMode) { case MAX_POOL: { BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), maxPooling2dCudaLauncher, (*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), NUMERIC_TYPES); } break; case AVG_POOL: { BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), avgPooling2dCudaLauncher, (*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), NUMERIC_TYPES); } break; case PNORM_POOL: { BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), pnormPooling2dCudaLauncher, (*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), FLOAT_TYPES); } break; default: throw std::runtime_error("Pooling2D: Unknown PoolingType used"); } output.tickWriteDevice(); input.tickReadDevice(); auto result = hipStreamSynchronize(*block.launchContext()->getCudaStream()); if (result != 0) throw cuda_exception::build("Pooling2D failed", result); } } }
6e9807a11baa4c352824761433fadcd815b794cf.cu
/* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/convolutions.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <math/templatemath.h> namespace sd { namespace ops { ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static __global__ void avgPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if(hstart < 0){ int f = sd::math::nd4j_ceil<Z,int>((Z) -hstart / (Z)dH); hstart += f * dH; } if(wstart < 0){ int f = sd::math::nd4j_ceil<Z,int>((Z) -wstart / (Z) dW); wstart += f * dW; } if(hend > iH){ int f = sd::math::nd4j_ceil<Z,int>((Z) (hend-iH) / (Z) dH); hend -= f * dH; } if(wend > iW){ int f = sd::math::nd4j_ceil<Z,int>((Z) (wend-iW) / (Z) dW); wend -= f * dW; } //Accounts for dilation int pool_size = sd::math::nd4j_ceil<double,int>((double) (hend-hstart) / (double) dH) * sd::math::nd4j_ceil<double,int>((double) (wend-wstart) / (double) dW); Z sum = 0.0f; const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) for (int w = wstart; w < wend; w += dW) sum += static_cast<Z>(inSlice[h * strideY + w * strideX]); int divide_factor = pool_size; //Case 0: exclude padding if (extraParam0 == 1) //Case 1: include padding divide_factor = kH * kW; z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = sum / static_cast<Z>(divide_factor); } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void avgPooling2dCudaLauncher(sd::LaunchContext & block, const void *vx, const Nd4jLong *vxShapeInfo, void *vz, const Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { avgPooling2dCuda<X, Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static __global__ void pnormPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; __shared__ bool fOrder; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if (hstart < 0) { int f = sd::math::nd4j_ceil<Z, int>((Z) -hstart / (Z) dH); hstart += f * dH; } if (wstart < 0) { int f = sd::math::nd4j_ceil<Z, int>((Z) -wstart / (Z) dW); wstart += f * dW; } if (hend > iH) { int f = sd::math::nd4j_ceil<Z, int>((Z) (hend - iH) / (Z) dH); hend -= f * dH; } if (wend > iW) { int f = sd::math::nd4j_ceil<Z, int>((Z) (wend - iW) / (Z) dW); wend -= f * dW; } //Accounts for dilation int pool_size = sd::math::nd4j_ceil<double, int>((double) (hend - hstart) / (double) dH) * sd::math::nd4j_ceil<double, int>((double) (wend - wstart) / (double) dW); Z sum = 0.f; const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) for (int w = wstart; w < wend; w += dW) sum += sd::math::nd4j_pow<Z, Z, Z>(static_cast<Z>(sd::math::nd4j_abs<X>(inSlice[h * strideY + w * strideX])), extraParam0); z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = sd::math::nd4j_pow<Z, Z, Z>(sum, (Z) 1.0f / extraParam0); } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void pnormPooling2dCudaLauncher(sd::LaunchContext & block, const void *vx, const Nd4jLong *vxShapeInfo, void *vz, const Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { pnormPooling2dCuda<X, Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static __global__ void maxPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; __shared__ bool fOrder; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if(hstart < 0){ int f = sd::math::nd4j_ceil<Z,int>((Z) -hstart / (Z)dH); hstart += f * dH; } if(wstart < 0){ int f = sd::math::nd4j_ceil<Z,int>((Z) -wstart / (Z) dW); wstart += f * dW; } if(hend > iH){ int f = sd::math::nd4j_ceil<Z,int>((Z) (hend-iH) / (Z) dH); hend -= f * dH; } if(wend > iW){ int f = sd::math::nd4j_ceil<Z,int>((Z) (wend-iW) / (Z) dW); wend -= f * dW; } //Accounts for dilation int pool_size = sd::math::nd4j_ceil<double,int>((double) (hend-hstart) / (double) dH) * sd::math::nd4j_ceil<double,int>((double) (wend-wstart) / (double) dW); Z max = -sd::DataTypeUtils::max<Z>(); const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { Z v = static_cast<Z>(inSlice[h * strideY + w * strideX]); if (v > max) max = v; } } z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = max; } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void maxPooling2dCudaLauncher(sd::LaunchContext & block, const void *vx, const Nd4jLong *vxShapeInfo, void *vz, const Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { maxPooling2dCuda<X,Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling2d(sd::graph::Context& block, const NDArray& input, NDArray& output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const PoolingType poolingMode, const int extraParam0) { if(!input.isActualOnDeviceSide()) input.syncToDevice(); switch (poolingMode) { case MAX_POOL: { BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), maxPooling2dCudaLauncher, (*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), NUMERIC_TYPES); } break; case AVG_POOL: { BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), avgPooling2dCudaLauncher, (*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), NUMERIC_TYPES); } break; case PNORM_POOL: { BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), pnormPooling2dCudaLauncher, (*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), FLOAT_TYPES); } break; default: throw std::runtime_error("Pooling2D: Unknown PoolingType used"); } output.tickWriteDevice(); input.tickReadDevice(); auto result = cudaStreamSynchronize(*block.launchContext()->getCudaStream()); if (result != 0) throw cuda_exception::build("Pooling2D failed", result); } } }
5a114877b2d732ce9d7758f9f56e5e0b678afde3.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************** * * Program used to test texture object and reference API * * a : zhonghy * date: 2018-4-24 ********************************************************/ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "helper_cuda.h" #include <iostream> #include <string> #include <vector> #include <stdio.h> //hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); int print(float *, int, int); __global__ void transformKernel(float *, hipTextureObject_t, int, int, float); //2D float texture texture<float, hipTextureType2D, hipReadModeElementType> texRef; int print(float *mat, int width, int height) { if(!mat) { return 0; } for(int i = 0; i < width; ++i) { for(int j = 0; j < height; ++j) { std::cout << mat[j * width + i] << " "; } std::cout << std::endl; } } //Simple transformation kernel __global__ void transformKernel(float *output, hipTextureObject_t texObj, int width, int height, float theta) { //Calculate normalized texture coordinates //2D block unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; float u = x / (float)width; float v = y / (float)height; //transform coordinates u -= 0.5f; v -= 0.5f; float tu = u * cosf(theta) - v * sinf(theta) + 0.5f; float tv = v * cosf(theta) + u * sinf(theta) + 0.5f; //Read from texture and write to global memory output[y * width + x] = tex2D<float>(texObj, tu, tv); } //Simple transfromation kernel __global__ void transformKernelRef(float *output, int width, int height, float theta) { //Calculate normalized texture coordinates //2D block unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; float u = x / (float)width; float v = y / (float)height; //transform coordinates u -= 0.5f; v -= 0.5f; float tu = u * cosf(theta) - v * sinf(theta) + 0.5f; float tv = v * cosf(theta) + u * sinf(theta) + 0.5f; //Read from texture and write to global memory output[y * width + x] = tex2D<float>(texRef, tu, tv); } int main(int argc, char *argv[]) { /**********************texture object*********************/ ////Allocate CUDA array in device memory //int width = 256; //int height = 256; //int angle = 30; //int size = width * height; //float *h_data; //h_data = (float*)malloc(size * sizeof(float)); //for(int i = 0; i < width; ++i) //{ // for(int j = 0; j < height; ++j) // { // h_data[j * width + i] = (i + j + 2) / 2; // } //} //print(h_data, 5 ,5); //hipChannelFormatDesc channelDesc = // hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); //hipArray *cuArray; //checkCudaErrors(hipMallocArray(&cuArray, &channelDesc, width, height)); ////Copy to device memory some data located at address h_data ////in host memory //checkCudaErrors(hipMemcpyToArray(cuArray, 0, 0, h_data, // size, hipMemcpyHostToDevice)); ////Specify texture //struct hipResourceDesc resDesc; //memset(&resDesc, 0, sizeof(resDesc)); //resDesc.resType = hipResourceTypeArray; //resDesc.res.array.array = cuArray; ////Specify texture object parameters //struct hipTextureDesc texDesc; //memset(&texDesc, 0, sizeof(texDesc)); //texDesc.addressMode[0] = hipAddressModeWrap; //texDesc.addressMode[1] = hipAddressModeWrap; //texDesc.filterMode = hipFilterModeLinear; //texDesc.readMode = hipReadModeElementType; //texDesc.normalizedCoords = 1; ////Create texture object //hipTextureObject_t texObj = 0; //hipCreateTextureObject(&texObj, &resDesc, &texDesc, NULL); ////Allocate result of transformation in device memory //float *output; //checkCudaErrors(hipMalloc(&output, width * height * sizeof(float))); ////Invoke kernel //dim3 dimBlock(16, 16); //dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, // (height + dimBlock.y - 1) / dimBlock.y); //transformKernel<<<dimGrid, dimBlock>>>(output, // texObj, width, height, angle); ////read result from device //float *output_h; ////output_h = (float*)malloc(size * sizeof(float)); //output_h = new float[size]; //checkCudaErrors(hipMemcpy(output_h, output, size, hipMemcpyDeviceToHost)); // print(output_h, 256, 10); ////Destory texture object //hipDestroyTextureObject(texObj); ////Free device memory // hipFreeArray(cuArray); // hipFree(output); //free(h_data); //delete[] output_h; /*********************texture reference*********************/ //Allocate CUDA array in device memory int width = 256; int height = 256; int angle = 30; int size = width * height; float *h_data; h_data = (float*)malloc(size * sizeof(float)); for(int i = 0; i < width; ++i) { for(int j = 0; j < height; ++j) { h_data[j * width + i] = (i + j + 2) / 2; } } print(h_data, 5 ,5); hipChannelFormatDesc channelDesc = //define texel type hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipArray *cuArray; checkCudaErrors(hipMallocArray(&cuArray, &channelDesc, width, height)); //Copy to device memory some data located at address h_data //in host memory checkCudaErrors(hipMemcpyToArray(cuArray, 0, 0, h_data, //for hipMemcpyToArray(), 0, 0--start loaction for copy size, hipMemcpyHostToDevice)); //Specify texture object parameters texRef.addressMode[0] = hipAddressModeWrap; texRef.addressMode[1] = hipAddressModeWrap; texRef.filterMode = hipFilterModeLinear; texRef.normalized = 1; //Bind the array to the texture reference checkCudaErrors(hipBindTextureToArray(texRef, cuArray, channelDesc)); //Allocate result of transformation in device memory float *output; checkCudaErrors(hipMalloc(&output, width * height * sizeof(float))); //Invoke kernel dim3 dimBlock(16, 16); dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( transformKernelRef), dim3(dimGrid), dim3(dimBlock), 0, 0, output, width, height, angle); //read result from device float *output_h; //output_h = (float*)malloc(size * sizeof(float)); output_h = new float[size]; checkCudaErrors(hipMemcpy(output_h, output, size, hipMemcpyDeviceToHost)); print(output_h, 256, 10); //Free device memory hipFreeArray(cuArray); hipFree(output); free(h_data); delete[] output_h; return EXIT_SUCCESS; }
5a114877b2d732ce9d7758f9f56e5e0b678afde3.cu
/****************************************************** * * Program used to test texture object and reference API * * a : zhonghy * date: 2018-4-24 ********************************************************/ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "helper_cuda.h" #include <iostream> #include <string> #include <vector> #include <stdio.h> //cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); int print(float *, int, int); __global__ void transformKernel(float *, cudaTextureObject_t, int, int, float); //2D float texture texture<float, cudaTextureType2D, cudaReadModeElementType> texRef; int print(float *mat, int width, int height) { if(!mat) { return 0; } for(int i = 0; i < width; ++i) { for(int j = 0; j < height; ++j) { std::cout << mat[j * width + i] << " "; } std::cout << std::endl; } } //Simple transformation kernel __global__ void transformKernel(float *output, cudaTextureObject_t texObj, int width, int height, float theta) { //Calculate normalized texture coordinates //2D block unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; float u = x / (float)width; float v = y / (float)height; //transform coordinates u -= 0.5f; v -= 0.5f; float tu = u * cosf(theta) - v * sinf(theta) + 0.5f; float tv = v * cosf(theta) + u * sinf(theta) + 0.5f; //Read from texture and write to global memory output[y * width + x] = tex2D<float>(texObj, tu, tv); } //Simple transfromation kernel __global__ void transformKernelRef(float *output, int width, int height, float theta) { //Calculate normalized texture coordinates //2D block unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; float u = x / (float)width; float v = y / (float)height; //transform coordinates u -= 0.5f; v -= 0.5f; float tu = u * cosf(theta) - v * sinf(theta) + 0.5f; float tv = v * cosf(theta) + u * sinf(theta) + 0.5f; //Read from texture and write to global memory output[y * width + x] = tex2D<float>(texRef, tu, tv); } int main(int argc, char *argv[]) { /**********************texture object*********************/ ////Allocate CUDA array in device memory //int width = 256; //int height = 256; //int angle = 30; //int size = width * height; //float *h_data; //h_data = (float*)malloc(size * sizeof(float)); //for(int i = 0; i < width; ++i) //{ // for(int j = 0; j < height; ++j) // { // h_data[j * width + i] = (i + j + 2) / 2; // } //} //print(h_data, 5 ,5); //cudaChannelFormatDesc channelDesc = // cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); //cudaArray *cuArray; //checkCudaErrors(cudaMallocArray(&cuArray, &channelDesc, width, height)); ////Copy to device memory some data located at address h_data ////in host memory //checkCudaErrors(cudaMemcpyToArray(cuArray, 0, 0, h_data, // size, cudaMemcpyHostToDevice)); ////Specify texture //struct cudaResourceDesc resDesc; //memset(&resDesc, 0, sizeof(resDesc)); //resDesc.resType = cudaResourceTypeArray; //resDesc.res.array.array = cuArray; ////Specify texture object parameters //struct cudaTextureDesc texDesc; //memset(&texDesc, 0, sizeof(texDesc)); //texDesc.addressMode[0] = cudaAddressModeWrap; //texDesc.addressMode[1] = cudaAddressModeWrap; //texDesc.filterMode = cudaFilterModeLinear; //texDesc.readMode = cudaReadModeElementType; //texDesc.normalizedCoords = 1; ////Create texture object //cudaTextureObject_t texObj = 0; //cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL); ////Allocate result of transformation in device memory //float *output; //checkCudaErrors(cudaMalloc(&output, width * height * sizeof(float))); ////Invoke kernel //dim3 dimBlock(16, 16); //dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, // (height + dimBlock.y - 1) / dimBlock.y); //transformKernel<<<dimGrid, dimBlock>>>(output, // texObj, width, height, angle); ////read result from device //float *output_h; ////output_h = (float*)malloc(size * sizeof(float)); //output_h = new float[size]; //checkCudaErrors(cudaMemcpy(output_h, output, size, cudaMemcpyDeviceToHost)); // print(output_h, 256, 10); ////Destory texture object //cudaDestroyTextureObject(texObj); ////Free device memory // cudaFreeArray(cuArray); // cudaFree(output); //free(h_data); //delete[] output_h; /*********************texture reference*********************/ //Allocate CUDA array in device memory int width = 256; int height = 256; int angle = 30; int size = width * height; float *h_data; h_data = (float*)malloc(size * sizeof(float)); for(int i = 0; i < width; ++i) { for(int j = 0; j < height; ++j) { h_data[j * width + i] = (i + j + 2) / 2; } } print(h_data, 5 ,5); cudaChannelFormatDesc channelDesc = //define texel type cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaArray *cuArray; checkCudaErrors(cudaMallocArray(&cuArray, &channelDesc, width, height)); //Copy to device memory some data located at address h_data //in host memory checkCudaErrors(cudaMemcpyToArray(cuArray, 0, 0, h_data, //for cudaMemcpyToArray(), 0, 0--start loaction for copy size, cudaMemcpyHostToDevice)); //Specify texture object parameters texRef.addressMode[0] = cudaAddressModeWrap; texRef.addressMode[1] = cudaAddressModeWrap; texRef.filterMode = cudaFilterModeLinear; texRef.normalized = 1; //Bind the array to the texture reference checkCudaErrors(cudaBindTextureToArray(texRef, cuArray, channelDesc)); //Allocate result of transformation in device memory float *output; checkCudaErrors(cudaMalloc(&output, width * height * sizeof(float))); //Invoke kernel dim3 dimBlock(16, 16); dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y); transformKernelRef<<<dimGrid, dimBlock>>>(output, width, height, angle); //read result from device float *output_h; //output_h = (float*)malloc(size * sizeof(float)); output_h = new float[size]; checkCudaErrors(cudaMemcpy(output_h, output, size, cudaMemcpyDeviceToHost)); print(output_h, 256, 10); //Free device memory cudaFreeArray(cuArray); cudaFree(output); free(h_data); delete[] output_h; return EXIT_SUCCESS; }
c1f697033be735a66a6cc2dc44d294697498116d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE #define FILENAME(line) \ FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/awkward_reduce_count_64.cu", \ line) #include "standard_parallel_algorithms.h" #include "awkward/kernels.h" __global__ void awkward_reduce_count_64_kernel(int64_t* toptr, const bool* fromptr, const int64_t* parents, int64_t lenparents) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < lenparents) { toptr[parents[thread_id]]++; } } ERROR awkward_reduce_count_64(int64_t* toptr, const bool* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength) { HANDLE_ERROR(hipMemset(toptr, 0, sizeof(int64_t) * outlength)); dim3 blocks_per_grid = blocks(lenparents); dim3 threads_per_block = threads(lenparents); hipLaunchKernelGGL(( awkward_reduce_count_64_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, toptr, fromptr, parents, lenparents); return success(); }
c1f697033be735a66a6cc2dc44d294697498116d.cu
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE #define FILENAME(line) \ FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/awkward_reduce_count_64.cu", \ line) #include "standard_parallel_algorithms.h" #include "awkward/kernels.h" __global__ void awkward_reduce_count_64_kernel(int64_t* toptr, const bool* fromptr, const int64_t* parents, int64_t lenparents) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < lenparents) { toptr[parents[thread_id]]++; } } ERROR awkward_reduce_count_64(int64_t* toptr, const bool* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength) { HANDLE_ERROR(cudaMemset(toptr, 0, sizeof(int64_t) * outlength)); dim3 blocks_per_grid = blocks(lenparents); dim3 threads_per_block = threads(lenparents); awkward_reduce_count_64_kernel<<<blocks_per_grid, threads_per_block>>>( toptr, fromptr, parents, lenparents); return success(); }
4623391d6e1a62e41a240877ea9d55976dfda69d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include "hip/hip_runtime.h" #include <device_launch_parameters.h> #include <stdlib.h> using namespace std; //variables globales #define num_filas_RGB 32 // num filas matriz RGB #define num_columnas_RGB 32 // num columnas matriz RGB #define width_RGB 32 // width matriz RGB #define num_filas_filtro 3 // num filas matriz Filtro #define num_columnas_filtro 3 // num columnas matriz Filtro #define width_filtro 3 // width matriz Filtro #define tile_width 2 // Declaracin de funciones void crearMatriz(int matriz[num_filas_RGB][num_columnas_RGB]); void imprimir_matriz(int a[num_filas_RGB][num_columnas_RGB]); void imprimir_matriz_filtro(int a[num_filas_filtro][num_columnas_filtro]); // Suma matrices __global__ void sumaMatrices(int R[num_filas_RGB][num_columnas_RGB], int G[num_filas_RGB][num_columnas_RGB], int B[num_filas_RGB][num_columnas_RGB], int RGB[num_filas_RGB][num_columnas_RGB]) { int x = threadIdx.x; int y = threadIdx.y; //calculamos la fila y la columna int fila = blockIdx.y * tile_width + y; int columna = blockIdx.x * tile_width + x; //sumamos RGB[fila][columna] = R[fila][columna] + G[fila][columna] + B[fila][columna]; } // Matriz volteada 180 __global__ void matrizVolteada(int a[num_filas_filtro][num_columnas_filtro], int volteada[num_filas_filtro][num_columnas_filtro]) { int x = threadIdx.x; int y = threadIdx.y; //calculamos la fila y la columna int fila = blockIdx.y * tile_width + y; int columna = blockIdx.x * tile_width + x; //matriz volteada // [0][0] <--> [2][2] || [1][1] <--> [1][1] if (fila == columna) { volteada[(num_filas_filtro - 1 - fila)][(num_columnas_filtro - 1 - columna)] = a[fila][columna]; } // [0][2] <--> [2][0] else if (((fila == 0) && (columna == (num_columnas_filtro - 1))) || ((fila == (num_filas_filtro - 1)) && (columna == 0))) { volteada[columna][fila] = a[fila][columna]; } // [0][1] <--> [2][1] else if (columna == 1) { volteada[abs(fila - (num_filas_filtro - 1))][columna] = a[fila][columna]; } // [1][0] <--> [1][2] else if (fila == 1) { volteada[fila][abs(columna - (num_columnas_filtro - 1))] = a[fila][columna]; } } // Convolucion __global__ void convolucion(int a[num_filas_RGB][num_columnas_RGB], int b[num_filas_filtro][num_columnas_filtro], int c[num_filas_RGB][num_columnas_RGB]) { int x = threadIdx.x; int y = threadIdx.y; //calculamos la fila y la columna int fila = blockIdx.y * tile_width + y; int columna = blockIdx.x * tile_width + x; //primera fila if (fila == 0) { //primer elemento de la fila if (columna == 0) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[0][0] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[0][1] * b[1][2] --> el elemento de la derecha a[fila + 1][columna] * b[2][1] + // a[1][0] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2]; // a[1][1] * b[2][2] --> el elemento de abajo a la derecha } //ultimo elemento de la fila else if (columna == (num_columnas_RGB - 1)) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[0][15] * b[1][1] --> el elemento inicial a[fila][columna - 1] * b[1][2] + // a[0][14] * b[1][2] --> el elemento de la izquierda a[fila + 1][columna] * b[2][1] + // a[1][15] * b[2][1] --> el elemento de abajo a[fila + 1][columna - 1] * b[2][2]; // a[1][14] * b[2][2] --> el elemento de abajo a la izquierda } // cualquier otra columna else { c[fila][columna] = a[fila][columna] * b[1][1] + // a[0][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[0][a + 1] * b[1][2] --> el elemento de la derecha a[fila][columna - 1] * b[1][2] + // a[0][a - 1] * b[1][2] --> el elemento de la izquierda a[fila + 1][columna] * b[2][1] + // a[1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2] + // a[1][a + 1] * b[2][2] --> el elemento de abajo a la derecha a[fila + 1][columna - 1] * b[2][2]; // a[1][a - 1] * b[2][2] --> el elemento de abajo a la izquierda } } //ultima fila else if (fila == (num_filas_RGB - 1)) { //primer elemento de la fila if (columna == 0) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[15][0] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[15][1] * b[1][2] --> el elemento de la derecha a[fila - 1][columna] * b[2][1] + // a[14][0] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2]; // a[14][1] * b[2][2] --> el elemento de arriba a la derecha } //ultimo elemento de la fila else if (columna == (num_columnas_RGB - 1)) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[15][15] * b[1][1] --> el elemento inicial a[fila][columna - 1] * b[1][2] + // a[15][14] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[14][15] * b[2][1] --> el elemento de arriba a[fila - 1][columna - 1] * b[2][2]; // a[14][14] * b[2][2] --> el elemento de arriba a la izquierda } // cualquier otra columna else { c[fila][columna] = a[fila][columna] * b[1][1] + // a[15][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[15][a + 1] * b[1][2] --> el elemento de la derecha a[fila][columna - 1] * b[1][2] + // a[15][a - 1] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[14][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2] + // a[14][a + 1] * b[2][2] --> el elemento de arriba a la derecha a[fila - 1][columna - 1] * b[2][2]; // a[14][a - 1] * b[2][2] --> el elemento de arriba a la izquierda } } //cualquier otra fila else { //primer elemento de la fila if (columna == 0) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[x][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[x][a + 1] * b[1][2] --> el elemento de la derecha a[fila - 1][columna] * b[2][1] + // a[x - 1][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2] + // a[x - 1][a + 1] * b[2][2] --> el elemento de arriba a la derecha a[fila + 1][columna] * b[2][1] + // a[x + 1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2]; // a[x + 1][a + 1] * b[2][2] --> el elemento de abajo a la derecha } //ultimo elemento de la fila else if (columna == (num_columnas_RGB - 1)) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[x][a] * b[1][1] --> el elemento inicial a[fila][columna - 1] * b[1][2] + // a[x][a - 1] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[x - 1][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna - 1] * b[2][2] + // a[x - 1][a - 1] * b[2][2] --> el elemento de arriba a la izquierda a[fila + 1][columna] * b[2][1] + // a[x + 1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna - 1] * b[2][2]; // a[x + 1][a - 1] * b[2][2] --> el elemento de abajo a la izquierda } // cualquier otra columna else { c[fila][columna] = a[fila][columna] * b[1][1] + // a[x][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[x][a + 1] * b[1][2] --> el elemento de la derecha a[fila][columna - 1] * b[1][2] + // a[x][a - 1] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[x - 1][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2] + // a[x - 1][a + 1] * b[2][2] --> el elemento de arriba a la derecha a[fila - 1][columna - 1] * b[2][2] + // a[x - 1][a - 1] * b[2][2] --> el elemento de arriba a la izquierda a[fila + 1][columna] * b[2][1] + // a[x + 1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2] + // a[x + 1][a + 1] * b[2][2] --> el elemento de abajo a la derecha a[fila + 1][columna - 1] * b[2][2]; // a[x + 1][a - 1] * b[2][2] --> el elemento de abajo a la izquierda } } } //Main int main() { // ------------------------------------------------------------------ // 1 - Creamos 3 matrices para despues sumarlas y crear la matriz RGB // ------------------------------------------------------------------ int R[num_filas_RGB][num_columnas_RGB] = {}; // representa a Red crearMatriz(R); int G[num_filas_RGB][num_columnas_RGB] = {}; // representa a Green crearMatriz(G); int B[num_filas_RGB][num_columnas_RGB] = {}; // representa a Blue crearMatriz(B); int RGB[num_filas_RGB][num_columnas_RGB] = {}; // matriz RGB int(*r_)[width_RGB]; int(*g_)[width_RGB]; int(*b_)[width_RGB]; int(*rgb_)[width_RGB]; const int size_RGB = num_filas_RGB * num_columnas_RGB * sizeof(int); // Reservamos memoria para las copias de las matrices que pasaremos por memoria del device hipMalloc((void**)&r_, size_RGB); hipMalloc((void**)&g_, size_RGB); hipMalloc((void**)&b_, size_RGB); hipMalloc((void**)&rgb_, size_RGB); // Asignamos R, G, B en la memoria del device hipMemcpy(r_, R, size_RGB, hipMemcpyHostToDevice); hipMemcpy(g_, G, size_RGB, hipMemcpyHostToDevice); hipMemcpy(b_, B, size_RGB, hipMemcpyHostToDevice); hipMemcpy(rgb_, RGB, size_RGB, hipMemcpyHostToDevice); // Definimos un bloque bidimensional (coleccion de hilos) dim3 dimGrid(width_RGB / tile_width, width_RGB / tile_width); //cuntos bloques tengo en mi matriz --> 32/2=16 dim3 dimBlock(tile_width, tile_width); //cantidad de hilos por bloque --> 2x2 hilos // Invocamos al Kernell sumaMatrices << <dimGrid, dimBlock >> > (r_, g_, b_, rgb_); // Leemos RGB del device hipMemcpy(RGB, rgb_, size_RGB, hipMemcpyDeviceToHost); // Imprimimos la matriz a convolucionar cout << "Matriz a convolucionar: " << endl << endl; imprimir_matriz(RGB); // ------------------------------------------------------------------------ // 2 - Creamos la matriz FILTRO y despus conseguimos su matriz volteada // ------------------------------------------------------------------------ int Filtro[num_filas_filtro][num_columnas_filtro] = { {1,2,3},{4,5,6},{7,8,9} };// { {0,1,0},{1,1,1},{0,1,0} }; int Filtro_volt[num_filas_filtro][num_columnas_filtro]; int(*filtro_)[width_filtro]; int(*filtro_volt_)[width_filtro]; const int size_filtro = num_filas_filtro * num_columnas_filtro * sizeof(int); // Reservamos memoria para las copias de las matrices que pasaremos por memoria del device hipMalloc((void**)&filtro_, size_filtro); hipMalloc((void**)&filtro_volt_, size_filtro); // Asignamos Filtro en la memoria del device hipMemcpy(filtro_, Filtro, size_filtro, hipMemcpyHostToDevice); hipMemcpy(filtro_volt_, Filtro_volt, size_filtro, hipMemcpyHostToDevice); // Definimos un bloque bidimensional (coleccion de hilos) dim3 dimGrid_filtro(width_filtro / tile_width, width_filtro / tile_width); //cuntos bloques tengo en mi matriz --> 3/2=1,5 --> 1 bloque dim3 dimBlock_filtro(width_filtro, width_filtro); //cantidad de hilos por bloque --> 9 hilos // Imprimimos la matriz filtro cout << "Matriz filtro inicial: " << endl << endl; imprimir_matriz_filtro(Filtro); // Invocamos al Kernell matrizVolteada << <dimGrid_filtro, dimBlock_filtro >> > (filtro_, filtro_volt_); // Leemos Filtro_volt del device hipMemcpy(Filtro_volt, filtro_volt_, size_filtro, hipMemcpyDeviceToHost); // Imprimimos la matriz filtro volteada cout << "Matriz filtro volteada: " << endl << endl; imprimir_matriz_filtro(Filtro_volt); // -------------------------------------------------------------------------------- // 3 - Realizamos la convolucin de la matriz tras haber creado la matriz resultado // -------------------------------------------------------------------------------- int Resultado[num_filas_RGB][num_columnas_RGB] = {}; int(*resultado_)[width_RGB]; // Reservamos memoria para la copia de la matriz que pasaremos por memoria del device hipMalloc((void**)&resultado_, size_RGB); // Asignamos Resultado, Filtro_volt, RGB en la memoria del device hipMemcpy(resultado_, Resultado, size_RGB, hipMemcpyHostToDevice); hipMemcpy(filtro_volt_, Filtro_volt, size_filtro, hipMemcpyHostToDevice); hipMemcpy(rgb_, RGB, size_RGB, hipMemcpyHostToDevice); // Invocamos al Kernell convolucion << <dimGrid, dimBlock >> > (rgb_, filtro_volt_, resultado_); // Leemos Resultado del device hipMemcpy(Resultado, resultado_, size_RGB, hipMemcpyDeviceToHost); // Imprimimos la matriz convolucionada cout << "Matriz convolucionada: " << endl << endl; imprimir_matriz(Resultado); // ----------------------------------------------------------------- // 4 - Liberamos memoria // ----------------------------------------------------------------- hipFree(r_); hipFree(g_); hipFree(b_); hipFree(rgb_); hipFree(filtro_); hipFree(filtro_volt_); return 0; } // Creamos la matriz void crearMatriz(int matriz[num_filas_RGB][num_columnas_RGB]) { int nums_random = 255; //el rango es [0,255] for (int i = 0; i < num_filas_RGB; i++) { for (int j = 0; j < num_columnas_RGB; j++) { matriz[i][j] = (rand() % nums_random) + 1; } } } // Imprimir matrices 32x32 void imprimir_matriz(int a[num_filas_RGB][num_columnas_RGB]) { for (int i = 0; i < num_filas_RGB; i++) { for (int j = 0; j < num_columnas_RGB; j++) { if (j == 0) { cout << "{"; } cout << a[i][j]; if (j == (num_columnas_RGB - 1)) { cout << "}\n"; } else { cout << ", "; } } } cout << endl << endl; } // Imprimir matriz 3x3 void imprimir_matriz_filtro(int a[num_filas_filtro][num_columnas_filtro]) { for (int i = 0; i < num_filas_filtro; i++) { for (int j = 0; j < num_columnas_filtro; j++) { if (j == 0) { cout << "{"; } cout << a[i][j]; if (j == (num_columnas_filtro - 1)) { cout << "}\n"; } else { cout << ", "; } } } cout << endl << endl; }
4623391d6e1a62e41a240877ea9d55976dfda69d.cu
 #include <stdio.h> #include <iostream> #include "cuda_runtime.h" #include <device_launch_parameters.h> #include <stdlib.h> using namespace std; //variables globales #define num_filas_RGB 32 // num filas matriz RGB #define num_columnas_RGB 32 // num columnas matriz RGB #define width_RGB 32 // width matriz RGB #define num_filas_filtro 3 // num filas matriz Filtro #define num_columnas_filtro 3 // num columnas matriz Filtro #define width_filtro 3 // width matriz Filtro #define tile_width 2 // Declaración de funciones void crearMatriz(int matriz[num_filas_RGB][num_columnas_RGB]); void imprimir_matriz(int a[num_filas_RGB][num_columnas_RGB]); void imprimir_matriz_filtro(int a[num_filas_filtro][num_columnas_filtro]); // Suma matrices __global__ void sumaMatrices(int R[num_filas_RGB][num_columnas_RGB], int G[num_filas_RGB][num_columnas_RGB], int B[num_filas_RGB][num_columnas_RGB], int RGB[num_filas_RGB][num_columnas_RGB]) { int x = threadIdx.x; int y = threadIdx.y; //calculamos la fila y la columna int fila = blockIdx.y * tile_width + y; int columna = blockIdx.x * tile_width + x; //sumamos RGB[fila][columna] = R[fila][columna] + G[fila][columna] + B[fila][columna]; } // Matriz volteada 180º __global__ void matrizVolteada(int a[num_filas_filtro][num_columnas_filtro], int volteada[num_filas_filtro][num_columnas_filtro]) { int x = threadIdx.x; int y = threadIdx.y; //calculamos la fila y la columna int fila = blockIdx.y * tile_width + y; int columna = blockIdx.x * tile_width + x; //matriz volteada // [0][0] <--> [2][2] || [1][1] <--> [1][1] if (fila == columna) { volteada[(num_filas_filtro - 1 - fila)][(num_columnas_filtro - 1 - columna)] = a[fila][columna]; } // [0][2] <--> [2][0] else if (((fila == 0) && (columna == (num_columnas_filtro - 1))) || ((fila == (num_filas_filtro - 1)) && (columna == 0))) { volteada[columna][fila] = a[fila][columna]; } // [0][1] <--> [2][1] else if (columna == 1) { volteada[abs(fila - (num_filas_filtro - 1))][columna] = a[fila][columna]; } // [1][0] <--> [1][2] else if (fila == 1) { volteada[fila][abs(columna - (num_columnas_filtro - 1))] = a[fila][columna]; } } // Convolucion __global__ void convolucion(int a[num_filas_RGB][num_columnas_RGB], int b[num_filas_filtro][num_columnas_filtro], int c[num_filas_RGB][num_columnas_RGB]) { int x = threadIdx.x; int y = threadIdx.y; //calculamos la fila y la columna int fila = blockIdx.y * tile_width + y; int columna = blockIdx.x * tile_width + x; //primera fila if (fila == 0) { //primer elemento de la fila if (columna == 0) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[0][0] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[0][1] * b[1][2] --> el elemento de la derecha a[fila + 1][columna] * b[2][1] + // a[1][0] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2]; // a[1][1] * b[2][2] --> el elemento de abajo a la derecha } //ultimo elemento de la fila else if (columna == (num_columnas_RGB - 1)) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[0][15] * b[1][1] --> el elemento inicial a[fila][columna - 1] * b[1][2] + // a[0][14] * b[1][2] --> el elemento de la izquierda a[fila + 1][columna] * b[2][1] + // a[1][15] * b[2][1] --> el elemento de abajo a[fila + 1][columna - 1] * b[2][2]; // a[1][14] * b[2][2] --> el elemento de abajo a la izquierda } // cualquier otra columna else { c[fila][columna] = a[fila][columna] * b[1][1] + // a[0][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[0][a + 1] * b[1][2] --> el elemento de la derecha a[fila][columna - 1] * b[1][2] + // a[0][a - 1] * b[1][2] --> el elemento de la izquierda a[fila + 1][columna] * b[2][1] + // a[1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2] + // a[1][a + 1] * b[2][2] --> el elemento de abajo a la derecha a[fila + 1][columna - 1] * b[2][2]; // a[1][a - 1] * b[2][2] --> el elemento de abajo a la izquierda } } //ultima fila else if (fila == (num_filas_RGB - 1)) { //primer elemento de la fila if (columna == 0) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[15][0] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[15][1] * b[1][2] --> el elemento de la derecha a[fila - 1][columna] * b[2][1] + // a[14][0] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2]; // a[14][1] * b[2][2] --> el elemento de arriba a la derecha } //ultimo elemento de la fila else if (columna == (num_columnas_RGB - 1)) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[15][15] * b[1][1] --> el elemento inicial a[fila][columna - 1] * b[1][2] + // a[15][14] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[14][15] * b[2][1] --> el elemento de arriba a[fila - 1][columna - 1] * b[2][2]; // a[14][14] * b[2][2] --> el elemento de arriba a la izquierda } // cualquier otra columna else { c[fila][columna] = a[fila][columna] * b[1][1] + // a[15][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[15][a + 1] * b[1][2] --> el elemento de la derecha a[fila][columna - 1] * b[1][2] + // a[15][a - 1] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[14][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2] + // a[14][a + 1] * b[2][2] --> el elemento de arriba a la derecha a[fila - 1][columna - 1] * b[2][2]; // a[14][a - 1] * b[2][2] --> el elemento de arriba a la izquierda } } //cualquier otra fila else { //primer elemento de la fila if (columna == 0) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[x][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[x][a + 1] * b[1][2] --> el elemento de la derecha a[fila - 1][columna] * b[2][1] + // a[x - 1][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2] + // a[x - 1][a + 1] * b[2][2] --> el elemento de arriba a la derecha a[fila + 1][columna] * b[2][1] + // a[x + 1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2]; // a[x + 1][a + 1] * b[2][2] --> el elemento de abajo a la derecha } //ultimo elemento de la fila else if (columna == (num_columnas_RGB - 1)) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[x][a] * b[1][1] --> el elemento inicial a[fila][columna - 1] * b[1][2] + // a[x][a - 1] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[x - 1][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna - 1] * b[2][2] + // a[x - 1][a - 1] * b[2][2] --> el elemento de arriba a la izquierda a[fila + 1][columna] * b[2][1] + // a[x + 1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna - 1] * b[2][2]; // a[x + 1][a - 1] * b[2][2] --> el elemento de abajo a la izquierda } // cualquier otra columna else { c[fila][columna] = a[fila][columna] * b[1][1] + // a[x][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[x][a + 1] * b[1][2] --> el elemento de la derecha a[fila][columna - 1] * b[1][2] + // a[x][a - 1] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[x - 1][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2] + // a[x - 1][a + 1] * b[2][2] --> el elemento de arriba a la derecha a[fila - 1][columna - 1] * b[2][2] + // a[x - 1][a - 1] * b[2][2] --> el elemento de arriba a la izquierda a[fila + 1][columna] * b[2][1] + // a[x + 1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2] + // a[x + 1][a + 1] * b[2][2] --> el elemento de abajo a la derecha a[fila + 1][columna - 1] * b[2][2]; // a[x + 1][a - 1] * b[2][2] --> el elemento de abajo a la izquierda } } } //Main int main() { // ------------------------------------------------------------------ // 1 - Creamos 3 matrices para despues sumarlas y crear la matriz RGB // ------------------------------------------------------------------ int R[num_filas_RGB][num_columnas_RGB] = {}; // representa a Red crearMatriz(R); int G[num_filas_RGB][num_columnas_RGB] = {}; // representa a Green crearMatriz(G); int B[num_filas_RGB][num_columnas_RGB] = {}; // representa a Blue crearMatriz(B); int RGB[num_filas_RGB][num_columnas_RGB] = {}; // matriz RGB int(*r_)[width_RGB]; int(*g_)[width_RGB]; int(*b_)[width_RGB]; int(*rgb_)[width_RGB]; const int size_RGB = num_filas_RGB * num_columnas_RGB * sizeof(int); // Reservamos memoria para las copias de las matrices que pasaremos por memoria del device cudaMalloc((void**)&r_, size_RGB); cudaMalloc((void**)&g_, size_RGB); cudaMalloc((void**)&b_, size_RGB); cudaMalloc((void**)&rgb_, size_RGB); // Asignamos R, G, B en la memoria del device cudaMemcpy(r_, R, size_RGB, cudaMemcpyHostToDevice); cudaMemcpy(g_, G, size_RGB, cudaMemcpyHostToDevice); cudaMemcpy(b_, B, size_RGB, cudaMemcpyHostToDevice); cudaMemcpy(rgb_, RGB, size_RGB, cudaMemcpyHostToDevice); // Definimos un bloque bidimensional (coleccion de hilos) dim3 dimGrid(width_RGB / tile_width, width_RGB / tile_width); //cuántos bloques tengo en mi matriz --> 32/2=16 dim3 dimBlock(tile_width, tile_width); //cantidad de hilos por bloque --> 2x2 hilos // Invocamos al Kernell sumaMatrices << <dimGrid, dimBlock >> > (r_, g_, b_, rgb_); // Leemos RGB del device cudaMemcpy(RGB, rgb_, size_RGB, cudaMemcpyDeviceToHost); // Imprimimos la matriz a convolucionar cout << "Matriz a convolucionar: " << endl << endl; imprimir_matriz(RGB); // ------------------------------------------------------------------------ // 2 - Creamos la matriz FILTRO y después conseguimos su matriz volteada // ------------------------------------------------------------------------ int Filtro[num_filas_filtro][num_columnas_filtro] = { {1,2,3},{4,5,6},{7,8,9} };// { {0,1,0},{1,1,1},{0,1,0} }; int Filtro_volt[num_filas_filtro][num_columnas_filtro]; int(*filtro_)[width_filtro]; int(*filtro_volt_)[width_filtro]; const int size_filtro = num_filas_filtro * num_columnas_filtro * sizeof(int); // Reservamos memoria para las copias de las matrices que pasaremos por memoria del device cudaMalloc((void**)&filtro_, size_filtro); cudaMalloc((void**)&filtro_volt_, size_filtro); // Asignamos Filtro en la memoria del device cudaMemcpy(filtro_, Filtro, size_filtro, cudaMemcpyHostToDevice); cudaMemcpy(filtro_volt_, Filtro_volt, size_filtro, cudaMemcpyHostToDevice); // Definimos un bloque bidimensional (coleccion de hilos) dim3 dimGrid_filtro(width_filtro / tile_width, width_filtro / tile_width); //cuántos bloques tengo en mi matriz --> 3/2=1,5 --> 1 bloque dim3 dimBlock_filtro(width_filtro, width_filtro); //cantidad de hilos por bloque --> 9 hilos // Imprimimos la matriz filtro cout << "Matriz filtro inicial: " << endl << endl; imprimir_matriz_filtro(Filtro); // Invocamos al Kernell matrizVolteada << <dimGrid_filtro, dimBlock_filtro >> > (filtro_, filtro_volt_); // Leemos Filtro_volt del device cudaMemcpy(Filtro_volt, filtro_volt_, size_filtro, cudaMemcpyDeviceToHost); // Imprimimos la matriz filtro volteada cout << "Matriz filtro volteada: " << endl << endl; imprimir_matriz_filtro(Filtro_volt); // -------------------------------------------------------------------------------- // 3 - Realizamos la convolución de la matriz tras haber creado la matriz resultado // -------------------------------------------------------------------------------- int Resultado[num_filas_RGB][num_columnas_RGB] = {}; int(*resultado_)[width_RGB]; // Reservamos memoria para la copia de la matriz que pasaremos por memoria del device cudaMalloc((void**)&resultado_, size_RGB); // Asignamos Resultado, Filtro_volt, RGB en la memoria del device cudaMemcpy(resultado_, Resultado, size_RGB, cudaMemcpyHostToDevice); cudaMemcpy(filtro_volt_, Filtro_volt, size_filtro, cudaMemcpyHostToDevice); cudaMemcpy(rgb_, RGB, size_RGB, cudaMemcpyHostToDevice); // Invocamos al Kernell convolucion << <dimGrid, dimBlock >> > (rgb_, filtro_volt_, resultado_); // Leemos Resultado del device cudaMemcpy(Resultado, resultado_, size_RGB, cudaMemcpyDeviceToHost); // Imprimimos la matriz convolucionada cout << "Matriz convolucionada: " << endl << endl; imprimir_matriz(Resultado); // ----------------------------------------------------------------- // 4 - Liberamos memoria // ----------------------------------------------------------------- cudaFree(r_); cudaFree(g_); cudaFree(b_); cudaFree(rgb_); cudaFree(filtro_); cudaFree(filtro_volt_); return 0; } // Creamos la matriz void crearMatriz(int matriz[num_filas_RGB][num_columnas_RGB]) { int nums_random = 255; //el rango es [0,255] for (int i = 0; i < num_filas_RGB; i++) { for (int j = 0; j < num_columnas_RGB; j++) { matriz[i][j] = (rand() % nums_random) + 1; } } } // Imprimir matrices 32x32 void imprimir_matriz(int a[num_filas_RGB][num_columnas_RGB]) { for (int i = 0; i < num_filas_RGB; i++) { for (int j = 0; j < num_columnas_RGB; j++) { if (j == 0) { cout << "{"; } cout << a[i][j]; if (j == (num_columnas_RGB - 1)) { cout << "}\n"; } else { cout << ", "; } } } cout << endl << endl; } // Imprimir matriz 3x3 void imprimir_matriz_filtro(int a[num_filas_filtro][num_columnas_filtro]) { for (int i = 0; i < num_filas_filtro; i++) { for (int j = 0; j < num_columnas_filtro; j++) { if (j == 0) { cout << "{"; } cout << a[i][j]; if (j == (num_columnas_filtro - 1)) { cout << "}\n"; } else { cout << ", "; } } } cout << endl << endl; }
0b94bf941fe02f6de38420829b2e8cd421c331c0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_NO_OPERATORS #define _USE_MATH_DEFINES #include <ATen/native/Activation.h> #include <cmath> #include <thrust/tuple.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/core/TensorBase.h> #include <c10/core/Scalar.h> #include <c10/hip/HIPMathCompat.h> #include <ATen/hip/ApplyGridUtils.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/native/hip/Loops.cuh> namespace at::native { // ----------------------------------- // glu forward // ----------------------------------- void glu_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, iter.dtype(), "glu_cuda", [&]() { using opmath_t = at::opmath_type<scalar_t>; gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a_, scalar_t b_) -> scalar_t { const opmath_t a = a_; const opmath_t b = b_; const opmath_t one = opmath_t(1); const opmath_t sigmoid = one / (one + ::exp(-b)); return a * sigmoid; }); }); } // ----------------------------------- // glu forward ad // ----------------------------------- void glu_jvp_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, iter.dtype(), "glu_cuda", [&]() { using opmath_t = at::opmath_type<scalar_t>; gpu_kernel( iter, [] GPU_LAMBDA( scalar_t res_, scalar_t b_, scalar_t da_, scalar_t db_) -> scalar_t { const opmath_t res = res_; const opmath_t b = b_; const opmath_t da = da_; const opmath_t db = db_; const opmath_t one = opmath_t(1); const opmath_t sig_b = one / (one + ::exp(-b)); return (da * sig_b + res * (db - sig_b * db)); }); }); } // ----------------------------------- // glu backward // ----------------------------------- // Byte offsets don't require multiplication by sizeof(T), so are slightly // cheaper. For fixed offsets, this removes all penalty from 64-bit indexing. template <typename T> __device__ T* byte_offset(T* ptr, int64_t offset) { using byte_ptr_t = typename std:: conditional<std::is_const<T>::value, const char*, char*>::type; return reinterpret_cast<T*>(reinterpret_cast<byte_ptr_t>(ptr) + offset); } template <typename scalar_t, typename OffsetCalc> __global__ void glu_backward_kernel( int numel, scalar_t* gI, const scalar_t* I, const scalar_t* gO, OffsetCalc offset_calculator, int64_t gI_byte_offset, int64_t I_byte_offset) { using opmath_t = at::opmath_type<scalar_t>; const uint32_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= numel) { return; } const auto offsets = offset_calculator.get(linear_index); // We explicitly iterate over the first half of the input tensor, and // gI_byte_offset and I_byte_offset are the offsets to access the // corresponding index in the second half of the tensor. const opmath_t a = I[offsets[1]]; const opmath_t b = *byte_offset(I + offsets[1], I_byte_offset); const opmath_t gO_val = gO[offsets[2]]; const auto one = opmath_t(1); const opmath_t sigmoid = one / (one + ::exp(-b)); auto* gA = gI + offsets[0]; *gA = sigmoid * gO_val; auto* gB = byte_offset(gA, gI_byte_offset); *gB = (one - sigmoid) * sigmoid * gO_val * a; } void launch_glu_backward_kernel( const TensorIteratorBase& iter, int64_t gI_stride, int64_t I_stride) { const auto N = iter.numel(); TORCH_INTERNAL_ASSERT_DEBUG_ONLY( N > 0 && N <= std::numeric_limits<int32_t>::max()); const auto offset_calculator = make_element_offset_calculator<3>(iter); constexpr int64_t block_size = 256; const int64_t grid = (N + block_size - 1) / block_size; const auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, iter.common_dtype(), "glu_backward_cuda", [&] { auto gI = static_cast<scalar_t*>(iter.data_ptr(0)); auto I = static_cast<const scalar_t*>(iter.data_ptr(1)); auto gO = static_cast<const scalar_t*>(iter.data_ptr(2)); hipLaunchKernelGGL(( glu_backward_kernel), dim3(grid), dim3(block_size), 0, stream, N, gI, I, gO, offset_calculator, gI_stride * sizeof(scalar_t), I_stride * sizeof(scalar_t)); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } REGISTER_DISPATCH(glu_stub, &glu_kernel); REGISTER_DISPATCH(glu_jvp_stub, &glu_jvp_kernel); } // namespace at::native
0b94bf941fe02f6de38420829b2e8cd421c331c0.cu
#define TORCH_ASSERT_NO_OPERATORS #define _USE_MATH_DEFINES #include <ATen/native/Activation.h> #include <cmath> #include <thrust/tuple.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/core/TensorBase.h> #include <c10/core/Scalar.h> #include <c10/cuda/CUDAMathCompat.h> #include <ATen/cuda/ApplyGridUtils.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/native/cuda/Loops.cuh> namespace at::native { // ----------------------------------- // glu forward // ----------------------------------- void glu_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, iter.dtype(), "glu_cuda", [&]() { using opmath_t = at::opmath_type<scalar_t>; gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a_, scalar_t b_) -> scalar_t { const opmath_t a = a_; const opmath_t b = b_; const opmath_t one = opmath_t(1); const opmath_t sigmoid = one / (one + std::exp(-b)); return a * sigmoid; }); }); } // ----------------------------------- // glu forward ad // ----------------------------------- void glu_jvp_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, iter.dtype(), "glu_cuda", [&]() { using opmath_t = at::opmath_type<scalar_t>; gpu_kernel( iter, [] GPU_LAMBDA( scalar_t res_, scalar_t b_, scalar_t da_, scalar_t db_) -> scalar_t { const opmath_t res = res_; const opmath_t b = b_; const opmath_t da = da_; const opmath_t db = db_; const opmath_t one = opmath_t(1); const opmath_t sig_b = one / (one + std::exp(-b)); return (da * sig_b + res * (db - sig_b * db)); }); }); } // ----------------------------------- // glu backward // ----------------------------------- // Byte offsets don't require multiplication by sizeof(T), so are slightly // cheaper. For fixed offsets, this removes all penalty from 64-bit indexing. template <typename T> __device__ T* byte_offset(T* ptr, int64_t offset) { using byte_ptr_t = typename std:: conditional<std::is_const<T>::value, const char*, char*>::type; return reinterpret_cast<T*>(reinterpret_cast<byte_ptr_t>(ptr) + offset); } template <typename scalar_t, typename OffsetCalc> __global__ void glu_backward_kernel( int numel, scalar_t* gI, const scalar_t* I, const scalar_t* gO, OffsetCalc offset_calculator, int64_t gI_byte_offset, int64_t I_byte_offset) { using opmath_t = at::opmath_type<scalar_t>; const uint32_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= numel) { return; } const auto offsets = offset_calculator.get(linear_index); // We explicitly iterate over the first half of the input tensor, and // gI_byte_offset and I_byte_offset are the offsets to access the // corresponding index in the second half of the tensor. const opmath_t a = I[offsets[1]]; const opmath_t b = *byte_offset(I + offsets[1], I_byte_offset); const opmath_t gO_val = gO[offsets[2]]; const auto one = opmath_t(1); const opmath_t sigmoid = one / (one + std::exp(-b)); auto* gA = gI + offsets[0]; *gA = sigmoid * gO_val; auto* gB = byte_offset(gA, gI_byte_offset); *gB = (one - sigmoid) * sigmoid * gO_val * a; } void launch_glu_backward_kernel( const TensorIteratorBase& iter, int64_t gI_stride, int64_t I_stride) { const auto N = iter.numel(); TORCH_INTERNAL_ASSERT_DEBUG_ONLY( N > 0 && N <= std::numeric_limits<int32_t>::max()); const auto offset_calculator = make_element_offset_calculator<3>(iter); constexpr int64_t block_size = 256; const int64_t grid = (N + block_size - 1) / block_size; const auto stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, iter.common_dtype(), "glu_backward_cuda", [&] { auto gI = static_cast<scalar_t*>(iter.data_ptr(0)); auto I = static_cast<const scalar_t*>(iter.data_ptr(1)); auto gO = static_cast<const scalar_t*>(iter.data_ptr(2)); glu_backward_kernel<<<grid, block_size, 0, stream>>>( N, gI, I, gO, offset_calculator, gI_stride * sizeof(scalar_t), I_stride * sizeof(scalar_t)); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } REGISTER_DISPATCH(glu_stub, &glu_kernel); REGISTER_DISPATCH(glu_jvp_stub, &glu_jvp_kernel); } // namespace at::native
3dbcf517a296364f4975f9e52794be6b240fe162.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cmath> #include <stdio.h> #include <string.h> __device__ __constant__ float D_H[ 3*3 ]; __device__ float norm(float val, int length) { float mean = length/2; float std = length/2; return (val-mean)/std; } __device__ float unorm(float val, int length) { float mean = length/2; float std = length/2; return val*std + mean; } __device__ void projectedCoord(int x, int y, int *xp, int *yp, int xlen, int ylen) { //printf("%d, %d \n", x, y); //NORMALIZE INPUT float nx = norm(x,xlen); float ny = norm(y,ylen); //printf("%f, %f \n", nx, ny); int sH = 3; float w = 1; //Assume that the projection starts from y=1 float hx = nx*D_H[ sH*0+0] + ny*D_H[ sH*0+1 ] + w*D_H[ sH*0+2 ]; float hy = nx*D_H[ sH*1+0] + ny*D_H[ sH*1+1 ] + w*D_H[ sH*1+2 ]; float hw = nx*D_H[ sH*2+0] + ny*D_H[ sH*2+1 ] + w*D_H[ sH*2+2 ]; //printf("%f, %f, %f\n",D_H[ sH*0+0], D_H[ sH*0+1], D_H[ sH*0+2]); //printf("%f, %f, %f\n",D_H[ sH*1+0], D_H[ sH*1+1], D_H[ sH*1+2]); //printf("%f, %f, %f\n",D_H[ sH*2+0], D_H[ sH*2+1], D_H[ sH*2+2]); //printf("%f %f %f \n", hx, hy, hw); //Unormalize Output *xp = unorm(hx/hw, xlen); *yp = unorm(hy/hw, ylen); //printf("%d, %d \n", *xp, *yp); } __device__ int im_idx(int r, int c, int width, int channels) { return channels*(width*r+c); } __device__ bool val_rc(int r, int c, int width, int height) { return r>=0 && r<height && c>=0 && c<width; } __global__ void proj_sub_tresh(unsigned char* img0, unsigned char* img1, unsigned char* out_img, int Width, int Height) { const unsigned int c = ( (blockDim.y * blockIdx.y) + threadIdx.y ); const unsigned int r = ( (blockDim.x * blockIdx.x) + threadIdx.x ); const unsigned int treshold = 60; const unsigned int ch = 3; //Channel const unsigned int s = sizeof(unsigned char); const unsigned int W = Width; int o_img_idx; int i_img_idx; unsigned int subval, subval0, subval1, subval2; int rp; int cp; //Projection, Background Sub, Treshold // Not sure why I wrote the matrix in this manner where the r column is reversed using x,y notation // Need to look deeper into and be fixed projectedCoord(c,r, &cp, &rp, Width, Height); //printf("%d, %d \n", rp, cp); if ( val_rc(rp,cp, Width, Height) && val_rc(r,c, Width, Height) ) { o_img_idx = im_idx(r,c, Width,ch); i_img_idx = im_idx(rp,cp, Width,ch); subval0 = abs( img1[ o_img_idx+0 ] - img0[ i_img_idx+0 ] ); subval1 = abs( img1[ o_img_idx+1 ] - img0[ i_img_idx+1 ] ); subval2 = abs( img1[ o_img_idx+2 ] - img0[ i_img_idx+2 ] ); subval = .21265*subval0 + .7152*subval1 + .0722*subval2; if (subval > treshold) { out_img[ o_img_idx+0 ] = subval; out_img[ o_img_idx+1 ] = subval; out_img[ o_img_idx+2 ] = subval; } //out_img[ o_img_idx+0 ] = img0[ i_img_idx+0]; //out_img[ o_img_idx+1 ] = img0[ i_img_idx+1]; //out_img[ o_img_idx+2 ] = img0[ i_img_idx+2]; } }
3dbcf517a296364f4975f9e52794be6b240fe162.cu
#include <iostream> #include <cmath> #include <stdio.h> #include <string.h> __device__ __constant__ float D_H[ 3*3 ]; __device__ float norm(float val, int length) { float mean = length/2; float std = length/2; return (val-mean)/std; } __device__ float unorm(float val, int length) { float mean = length/2; float std = length/2; return val*std + mean; } __device__ void projectedCoord(int x, int y, int *xp, int *yp, int xlen, int ylen) { //printf("%d, %d \n", x, y); //NORMALIZE INPUT float nx = norm(x,xlen); float ny = norm(y,ylen); //printf("%f, %f \n", nx, ny); int sH = 3; float w = 1; //Assume that the projection starts from y=1 float hx = nx*D_H[ sH*0+0] + ny*D_H[ sH*0+1 ] + w*D_H[ sH*0+2 ]; float hy = nx*D_H[ sH*1+0] + ny*D_H[ sH*1+1 ] + w*D_H[ sH*1+2 ]; float hw = nx*D_H[ sH*2+0] + ny*D_H[ sH*2+1 ] + w*D_H[ sH*2+2 ]; //printf("%f, %f, %f\n",D_H[ sH*0+0], D_H[ sH*0+1], D_H[ sH*0+2]); //printf("%f, %f, %f\n",D_H[ sH*1+0], D_H[ sH*1+1], D_H[ sH*1+2]); //printf("%f, %f, %f\n",D_H[ sH*2+0], D_H[ sH*2+1], D_H[ sH*2+2]); //printf("%f %f %f \n", hx, hy, hw); //Unormalize Output *xp = unorm(hx/hw, xlen); *yp = unorm(hy/hw, ylen); //printf("%d, %d \n", *xp, *yp); } __device__ int im_idx(int r, int c, int width, int channels) { return channels*(width*r+c); } __device__ bool val_rc(int r, int c, int width, int height) { return r>=0 && r<height && c>=0 && c<width; } __global__ void proj_sub_tresh(unsigned char* img0, unsigned char* img1, unsigned char* out_img, int Width, int Height) { const unsigned int c = ( (blockDim.y * blockIdx.y) + threadIdx.y ); const unsigned int r = ( (blockDim.x * blockIdx.x) + threadIdx.x ); const unsigned int treshold = 60; const unsigned int ch = 3; //Channel const unsigned int s = sizeof(unsigned char); const unsigned int W = Width; int o_img_idx; int i_img_idx; unsigned int subval, subval0, subval1, subval2; int rp; int cp; //Projection, Background Sub, Treshold // Not sure why I wrote the matrix in this manner where the r column is reversed using x,y notation // Need to look deeper into and be fixed projectedCoord(c,r, &cp, &rp, Width, Height); //printf("%d, %d \n", rp, cp); if ( val_rc(rp,cp, Width, Height) && val_rc(r,c, Width, Height) ) { o_img_idx = im_idx(r,c, Width,ch); i_img_idx = im_idx(rp,cp, Width,ch); subval0 = abs( img1[ o_img_idx+0 ] - img0[ i_img_idx+0 ] ); subval1 = abs( img1[ o_img_idx+1 ] - img0[ i_img_idx+1 ] ); subval2 = abs( img1[ o_img_idx+2 ] - img0[ i_img_idx+2 ] ); subval = .21265*subval0 + .7152*subval1 + .0722*subval2; if (subval > treshold) { out_img[ o_img_idx+0 ] = subval; out_img[ o_img_idx+1 ] = subval; out_img[ o_img_idx+2 ] = subval; } //out_img[ o_img_idx+0 ] = img0[ i_img_idx+0]; //out_img[ o_img_idx+1 ] = img0[ i_img_idx+1]; //out_img[ o_img_idx+2 ] = img0[ i_img_idx+2]; } }
7b1bc1686d0f5c53d58a6fe1a8443aae47ba35e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <stdlib.h> #include <cstring> #include <limits> // radi definiranja beskonanosti #include <ctime> // radi mjerenja vremena izvravanja #include <cmath> // radi "strop" funkcije using namespace std; /* Definiramo beskonanost kao najvei mogui integer broj. */ #define infty std::numeric_limits<int>::max() void printMatrix (int* G, unsigned int dim) { cout << "\r\n"; for (int i = 0; i < dim*dim; i++) { if (G[i] < infty) { cout << G[i] << "\t"; } else { cout << "" << "\t"; } /* Ako je ispisao sve za jedan vrh, prijei na sljedei u novi redak. */ if ((i+1)%dim == 0) { cout << "\r\n"; } } } /* Kernel za device koji implementira prvu fazu blocked Floyd-Warshall algoritma. */ __global__ void FW_Cuda_Phase1(int B, int* D, int* PI, int dim, int primaryBlockDim) { extern __shared__ int shrd[]; int* sh_D = &shrd[0]; int* sh_PI = &shrd[primaryBlockDim*primaryBlockDim]; int i = B*primaryBlockDim + threadIdx.y; int j = B*primaryBlockDim + threadIdx.x; int sub_dim = primaryBlockDim; int sub_i = threadIdx.y; int sub_j = threadIdx.x; __syncthreads(); if (sub_i < sub_dim && sub_j < sub_dim && sub_i*sub_dim+sub_j < sub_dim*sub_dim && i < max(sub_dim, dim) && j < max(sub_dim, dim)) { sh_D[sub_i*sub_dim+sub_j] = D[i*dim+j]; sh_PI[sub_i*sub_dim+sub_j] = PI[i*dim+j]; } else { sh_D[sub_i*sub_dim+sub_j] = INT_MAX; sh_PI[sub_i*sub_dim+sub_j] = -1; } __syncthreads(); for (int sub_k = 0; sub_k < min(sub_dim, dim); sub_k++) { __syncthreads(); if (i < max(sub_dim, dim) && j < max(sub_dim, dim) && sh_D[sub_i*sub_dim+sub_k] < INT_MAX && sh_D[sub_k*sub_dim+sub_j] < INT_MAX) { if (sh_D[sub_i*sub_dim+sub_j] > sh_D[sub_i*sub_dim+sub_k] + sh_D[sub_k*sub_dim+sub_j]) { sh_D[sub_i*sub_dim+sub_j] = sh_D[sub_i*sub_dim+sub_k] + sh_D[sub_k*sub_dim+sub_j]; sh_PI[sub_i*sub_dim+sub_j] = sh_PI[sub_k*sub_dim+sub_j]; } } } __syncthreads(); if (sub_i < min(sub_dim, dim) && sub_j < min(sub_dim, dim) && sub_i*sub_dim+sub_j < sub_dim*sub_dim && i < max(sub_dim, dim) && j < max(sub_dim, dim)) { D[i*dim+j] = sh_D[sub_i*sub_dim+sub_j]; PI[i*dim+j] = sh_PI[sub_i*sub_dim+sub_j]; } } /* Kernel za device koji implementira drugu fazu blocked Floyd-Warshall algoritma. */ __global__ void FW_Cuda_Phase2(int B, int* D, int* PI, int dim, int primaryBlockDim) { extern __shared__ int shrd[]; /* Sve varijable koje imaju prefiks "p_" pripadaju primarnom podbloku, sve koje imaju "c_" pripadaju trenutnom bloku. */ int* p_sh_D = &shrd[0]; int* p_sh_PI = &shrd[primaryBlockDim*primaryBlockDim]; int* c_sh_D = &shrd[2*primaryBlockDim*primaryBlockDim]; int* c_sh_PI = &shrd[3*primaryBlockDim*primaryBlockDim]; int p_i = B*primaryBlockDim + threadIdx.y; int p_j = B*primaryBlockDim + threadIdx.x; /* Ako je trenutni blok prije primarnog, skipCenterBlock biti e 0. Inae, ako je primarni ili neki nakon njega, biti e 1. */ int skipCenterBlock = min((blockIdx.x+1)/(B+1), 1); int c_i, c_j; /* Ako je y koordinata bloka u gridu jednaka 0, onda on pripada istom retku kao i primarni blok. Ako je y koordinata bloka u gridu jednaka 1, pripada istom stupcu kao i primarni blok. */ if (blockIdx.y == 0) { c_i = p_i; c_j = (blockIdx.x+skipCenterBlock)*primaryBlockDim + threadIdx.x; } else { c_i = (blockIdx.x+skipCenterBlock)*primaryBlockDim + threadIdx.y; c_j = p_j; } int sub_dim = primaryBlockDim; int sub_i = threadIdx.y; int sub_j = threadIdx.x; __syncthreads(); p_sh_D[sub_i*sub_dim+sub_j] = D[p_i*dim+p_j]; p_sh_PI[sub_i*sub_dim+sub_j] = PI[p_i*dim+p_j]; if (sub_i < sub_dim && sub_j < sub_dim && sub_i*sub_dim+sub_j < sub_dim*sub_dim && c_i < max(sub_dim, dim) && c_j < max(sub_dim, dim)) { c_sh_D[sub_i*sub_dim+sub_j] = D[c_i*dim+c_j]; c_sh_PI[sub_i*sub_dim+sub_j] = PI[c_i*dim+c_j]; } else { c_sh_D[sub_i*sub_dim+sub_j] = INT_MAX; c_sh_PI[sub_i*sub_dim+sub_j] = -1; } __syncthreads(); for (int sub_k = 0; sub_k < min(sub_dim, dim); sub_k++) { __syncthreads(); /* Pripada istom stupcu kao i primarni blok. */ if (blockIdx.y == 1) { if (c_i < max(sub_dim, dim) && c_j < max(sub_dim, dim) && c_sh_D[sub_i*sub_dim+sub_k] < INT_MAX && p_sh_D[sub_k*sub_dim+sub_j] < INT_MAX) { if (c_sh_D[sub_i*sub_dim+sub_j] > c_sh_D[sub_i*sub_dim+sub_k] + p_sh_D[sub_k*sub_dim+sub_j]) { c_sh_D[sub_i*sub_dim+sub_j] = c_sh_D[sub_i*sub_dim+sub_k] + p_sh_D[sub_k*sub_dim+sub_j]; c_sh_PI[sub_i*sub_dim+sub_j] = p_sh_PI[sub_k*sub_dim+sub_j]; } } } /* Pripada istom retku kao i primarni blok. */ if (blockIdx.y == 0) { if (c_i < max(sub_dim, dim) && c_j < max(sub_dim, dim) && p_sh_D[sub_i*sub_dim+sub_k] < INT_MAX && c_sh_D[sub_k*sub_dim+sub_j] < INT_MAX) { if (c_sh_D[sub_i*sub_dim+sub_j] > p_sh_D[sub_i*sub_dim+sub_k] + c_sh_D[sub_k*sub_dim+sub_j]) { c_sh_D[sub_i*sub_dim+sub_j] = p_sh_D[sub_i*sub_dim+sub_k] + c_sh_D[sub_k*sub_dim+sub_j]; c_sh_PI[sub_i*sub_dim+sub_j] = c_sh_PI[sub_k*sub_dim+sub_j]; } } } __syncthreads(); } __syncthreads(); if (sub_i < min(sub_dim, dim) && sub_j < min(sub_dim, dim) && sub_i*sub_dim+sub_j < sub_dim*sub_dim && c_i < max(sub_dim, dim) && c_j < max(sub_dim, dim)) { D[c_i*dim+c_j] = c_sh_D[sub_i*sub_dim+sub_j]; PI[c_i*dim+c_j] = c_sh_PI[sub_i*sub_dim+sub_j]; } } /* Kernel za device koji implementira treu fazu blocked Floyd-Warshall algoritma. */ __global__ void FW_Cuda_Phase3(int B, int* D, int* PI, int dim, int primaryBlockDim) { extern __shared__ int shrd[]; /* Sve varijable koje imaju prefiks "p1_" pripadaju primarnom podbloku 1 izraunatom u fazi 2, sve koje imaju prefiks "p2_" pripadaju primarnom podbloku 2 izraunatom u fazi 2, a sve koje imaju "c_" pripadaju trenutnom bloku. */ int* p1_sh_D = &shrd[0]; int* p1_sh_PI = &shrd[primaryBlockDim*primaryBlockDim]; int* p2_sh_D = &shrd[2*primaryBlockDim*primaryBlockDim]; int* p2_sh_PI = &shrd[3*primaryBlockDim*primaryBlockDim]; int* c_sh_D = &shrd[4*primaryBlockDim*primaryBlockDim]; int* c_sh_PI = &shrd[5*primaryBlockDim*primaryBlockDim]; /* Ako je trenutni blok prije primarnog, skipCenterBlock biti e 0. Inae, ako je primarni ili neki nakon njega, biti e 1. U ovoj fazi to radimo po obje osi. */ int skipCenterBlockX = min((blockIdx.x+1)/(B+1), 1); int skipCenterBlockY = min((blockIdx.y+1)/(B+1), 1); int c_i = (blockIdx.y+skipCenterBlockY)*primaryBlockDim + threadIdx.y; int c_j = (blockIdx.x+skipCenterBlockX)*primaryBlockDim + threadIdx.x; int p1_i = c_i; int p1_j = B*primaryBlockDim + threadIdx.x; int p2_i = B*primaryBlockDim + threadIdx.y; int p2_j = c_j; int sub_dim = primaryBlockDim; int sub_i = threadIdx.y; int sub_j = threadIdx.x; __syncthreads(); p1_sh_D[sub_i*sub_dim+sub_j] = D[p1_i*dim+p1_j]; p1_sh_PI[sub_i*sub_dim+sub_j] = PI[p1_i*dim+p1_j]; p2_sh_D[sub_i*sub_dim+sub_j] = D[p2_i*dim+p2_j]; p2_sh_PI[sub_i*sub_dim+sub_j] = PI[p2_i*dim+p2_j]; if (sub_i < sub_dim && sub_j < sub_dim && sub_i*sub_dim+sub_j < sub_dim*sub_dim && c_i < dim && c_j < dim) { c_sh_D[sub_i*sub_dim+sub_j] = D[c_i*dim+c_j]; c_sh_PI[sub_i*sub_dim+sub_j] = PI[c_i*dim+c_j]; } else { c_sh_D[sub_i*sub_dim+sub_j] = INT_MAX; c_sh_PI[sub_i*sub_dim+sub_j] = -1; } __syncthreads(); for (int sub_k = 0; sub_k < min(sub_dim, dim); sub_k++) { __syncthreads(); if (c_i < max(sub_dim, dim) && c_j < max(sub_dim, dim) && p1_sh_D[sub_i*sub_dim+sub_k] < INT_MAX && p2_sh_D[sub_k*sub_dim+sub_j] < INT_MAX) { if (c_sh_D[sub_i*sub_dim+sub_j] > p1_sh_D[sub_i*sub_dim+sub_k] + p2_sh_D[sub_k*sub_dim+sub_j]) { c_sh_D[sub_i*sub_dim+sub_j] = p1_sh_D[sub_i*sub_dim+sub_k] + p2_sh_D[sub_k*sub_dim+sub_j]; c_sh_PI[sub_i*sub_dim+sub_j] = p2_sh_PI[sub_k*sub_dim+sub_j]; } } __syncthreads(); } __syncthreads(); if (sub_i < min(sub_dim, dim) && sub_j < min(sub_dim, dim) && sub_i*sub_dim+sub_j < sub_dim*sub_dim && c_i < max(sub_dim, dim) && c_j < max(sub_dim, dim)) { D[c_i*dim+c_j] = c_sh_D[sub_i*sub_dim+sub_j]; PI[c_i*dim+c_j] = c_sh_PI[sub_i*sub_dim+sub_j]; } } void Blocked_Floyd_Warshall_Cuda (int* W, int* D, int* PI, unsigned int dim) { unsigned int n = dim*dim; /* Error varijabla za handleanje CUDA errora. */ hipError_t err = hipSuccess; /* Alociranje device varijabli matrica D i PI. */ int* d_D = NULL; err = hipMalloc((void**) &d_D, n*sizeof(int)); if (err != hipSuccess) { fprintf(stderr, "Neuspjeno alociranje matrice D (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } int* d_PI = NULL; err = hipMalloc((void**) &d_PI, n*sizeof(int)); if (err != hipSuccess) { fprintf(stderr, "Neuspjeno alociranje matrice PI (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* Kopiranje podataka iz host matrica u device. */ err = hipMemcpy(d_D, D, n*sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Neuspjeno kopiranje matrice D iz hosta u device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_PI, PI, n*sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Neuspjeno kopiranje matrice PI iz hosta u device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* Pozivanje CUDA kernela. */ int blockDim = 25; /* Ukoliko je dimenzija bloka 32, imamo 1024 threadova po bloku. U blocked verziji Floyd-Warshall algoritma ovaj broj ovisi o tome koliko threadova se moe pokrenuti u jednom bloku na nekoj grafikoj kartici, ali i o tome koliko shared memorije po multiprocesoru ima dotina grafika kartica. Konkretno, za Teslu C2050/C2070 na kakvoj je raen ovaj rad, maksimalno je 1024 threada po bloku, te je maksimalna shareana memorija multiprocesora 48kb, to bi znailo da moemo spremiti 4096 int elementa (jer u fazi 3 nam trebaju 3 bloka pa zapravo imamo 16kb po bloku za spremanje integera). Kako moramo gledati broj koji je manji (izmeu 1024 zbog threadova/bloku i 4096 zbog shareane memorije), algoritam emo pokretati tako da je jedan blok veliine najvie 32*32, pri emu se za veliinu bloka uzima prvi broj manji ili jednak broju 32 s kojim je dijeljiv broj vrhova grafa. */ int numberOfBlocks = ceil((float)dim/(float)blockDim); /* Broj (primarnih) blokova u blocked Floyd-Warshall algoritmu. */ cout << "Blocked Floyd-Warshall algoritam se pokree sa " << numberOfBlocks << " primarna bloka po dijagonali.\r\n"; cout << "CUDA kerneli se pokreu kako slijedi: \r\n \t Faza 1: grid dimenzije 1x1 \r\n"; cout << "\t Faza 2: grid dimenzije " << numberOfBlocks-1 << "x2"; if (numberOfBlocks-1 == 0) cout << " (Faza 2 se nee izvriti zbog dimenzija grida)"; cout << "\r\n"; cout << "\t Faza 3: grid dimenzije " << numberOfBlocks-1 << "x" << numberOfBlocks-1; if (numberOfBlocks-1 == 0) cout << " (Faza 3 se nee izvriti zbog dimenzija grida)"; cout << "\r\n"; cout << "Svi blokovi se pokreu s " << blockDim*blockDim << " threada po bloku.\r\n"; /* Iteriranje po blokovima radimo na CPU, ostalo paraleliziramo. */ for (int B = 0; B < numberOfBlocks; B++) { /* Veliina shared memorije je blockDim*blockDim za matricu D i za matricu PI. */ hipLaunchKernelGGL(( FW_Cuda_Phase1), dim3(dim3(1, 1, 1)), dim3(dim3(blockDim, blockDim, 1)), 2*blockDim*blockDim*sizeof(int), 0, B, d_D, d_PI, dim, blockDim); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Neuspjeno pokrenuta kernel metoda FW_Cuda_Phase1 (error code %s)!\n", hipGetErrorString(err)); cout << "\r\n B = " << B << "\r\n"; exit(EXIT_FAILURE); } hipDeviceSynchronize(); /* Veliina shared memorije je blockDim*blockDim za primarnu matricu D, trenutnu matricu D, primarnu i trenutnu za matricu PI. */ if (numberOfBlocks-1 > 0) { hipLaunchKernelGGL(( FW_Cuda_Phase2), dim3(dim3(numberOfBlocks-1, 2, 1)), dim3(dim3(blockDim, blockDim, 1)), 4*blockDim*blockDim*sizeof(int), 0, B, d_D, d_PI, dim, blockDim); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Neuspjeno pokrenuta kernel metoda FW_Cuda_Phase2 (error code %s)!\n", hipGetErrorString(err)); cout << "\r\n B = " << B << "\r\n"; exit(EXIT_FAILURE); } } hipDeviceSynchronize(); /* Veliina shared memorije je blockDim*blockDim za trenutnu matricu D, dvije primarne matrice D izraunate u fazi 2, te za pripadne matrice PI. */ if (numberOfBlocks-1 > 0) { hipLaunchKernelGGL(( FW_Cuda_Phase3), dim3(dim3(numberOfBlocks-1, numberOfBlocks-1, 1)), dim3(dim3(blockDim, blockDim, 1)), 6*blockDim*blockDim*sizeof(int), 0, B, d_D, d_PI, dim, blockDim); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Neuspjeno pokrenuta kernel metoda FW_Cuda_Phase3 (error code %s)!\n", hipGetErrorString(err)); cout << "\r\n B = " << B << "\r\n"; exit(EXIT_FAILURE); } } /* Sinkronizacija threadova kako bi se zavrila B-ta iteracija, te kako bi se prelo na (B+1). iteraciju. */ hipDeviceSynchronize(); } /* Kopiranje podataka iz device matrica u host. */ err = hipMemcpy(D, d_D, n*sizeof(int), hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Neuspjeno kopiranje matrice D iz devicea u host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(PI, d_PI, n*sizeof(int), hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Neuspjeno kopiranje matrice PI iz devicea u host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* Dealociranje device varijabli matrica D i PI. */ err = hipFree(d_D); if (err != hipSuccess) { fprintf(stderr, "Neuspjeno dealociranje matrice D (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_PI); if (err != hipSuccess) { fprintf(stderr, "Neuspjeno dealociranje matrice PI (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* Reset CUDA devicea i zavravanje CUDA Floyd-Warshalla. */ err = hipDeviceReset(); if (err != hipSuccess) { fprintf(stderr, "Neuspjeno resetiranje devicea (zavravanje sa CUDA FW, priprema za sljedee pokretanje)! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } /* Metoda koja rekonstruira teinu najkraeg puta za dani par vrhova koristei matricu prethodnika PI i matricu inicijalnih teina W. */ int getPath (int* W, int* PI, int i, int j, unsigned int dim) { if (i == j) { return 0; } else if (PI[i*dim+j] == -1) { return infty; } else { int recursivePath = getPath(W, PI, i, PI[i*dim+j], dim); if (recursivePath < infty) { return recursivePath + W[PI[i*dim+j]*dim+j]; } else { return infty; } } } /* Za svaki par vrhova pokree getPath metodu koja rekonstruira teinu najkraeg puta izmeu njih koristei matricu prethodnika PI. Tu teinu onda usporeuje sa dobivenom teinom za isti par vrhova u matrici najkraih putova D. */ bool checkSolutionCorrectness (int* W, int* D, int* PI, unsigned int dim) { for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { if (getPath(W, PI, i, j, dim) != D[i*dim+j]) { return false; } } } return true; } int main() { /* V - broj vrhova E - broj bridova u - prvi vrh pri uitavanju grafa iz datoteke v - drugi vrh pri uitavanju grafa iz datoteke w - teina izmeu v1 i v2 pri uitavanju grafa iz datoteke */ unsigned int V, E; int u, v, w; ifstream inputGraphFile; inputGraphFile.open("graphFile.txt"); ofstream outputFile; outputFile.open("output_cuda_blocked.txt"); inputGraphFile >> V >> E; cout << "V = " << V << ", E = " << E << "\r\n"; unsigned int n = V*V; /* Inicijalizacija grafova u memoriji. */ int* W = (int*)malloc(n*sizeof(int)); int* D = (int*)malloc(n*sizeof(int)); int* PI = (int*)malloc(n*sizeof(int)); /* Postavljanje inicijalnih vrijednosti za matricu prethodnika PI(0), matricu teina W i matricu najkraih putova D(0). */ fill_n(W, n, infty); fill_n(PI, n, -1); for (int i = 0; i < E; i++) { inputGraphFile >> u >> v >> w; //cout << u << " <-- " << w << " --> " << v << "\r\n"; W[u*V+v] = w; if (u != v) { PI[u*V+v] = u; } } for (int i = 0; i < V; i++) { W[i*V+i] = 0; } /* D(0) = W na poetku. */ memcpy (D, W, n*sizeof(int)); // printMatrix(W, V); printMatrix(D, V); printMatrix(PI, V); /* Poetak mjerenja izvravanja Floyd-Warshall algoritma. */ clock_t begin = clock(); /* Pozivamo Floyd-Warshall CPU algoritam nad uitanim grafom. */ Blocked_Floyd_Warshall_Cuda(W, D, PI, V); /* Kraj mjerenja izvravanja Floyd-Warshall algoritma. */ clock_t end = clock(); double elapsedTime = double(end - begin) / CLOCKS_PER_SEC; //printMatrix(W, V); printMatrix(D, V); printMatrix(PI, V); /* Ispis rezultata u datoteku. */ outputFile << "|V| = " << V << ", |E| = " << E << "\r\n\r\n"; for (int i = 0; i < n; i++) { if (i%V==0) outputFile << "\r\n"; if (D[i] < infty) outputFile << D[i] << "\t"; else outputFile << "" << "\t"; } outputFile << "\r\n\r\n"; for (int i = 0; i < n; i++) { if (i%V==0) outputFile << "\r\n"; outputFile << PI[i] << "\t"; } cout << "Vrijeme izvravanja Blocked Floyd-Warshall algoritma: " << elapsedTime << "s.\r\n"; if (checkSolutionCorrectness(W, D, PI, V) == true) cout << "Svi najkrai putevi su tono izraunati!\r\n"; else cout << "Najkrai putevi nisu tono izraunati.\r\n"; inputGraphFile.close(); outputFile.close(); free(W); free(D); free(PI); return 0; }
7b1bc1686d0f5c53d58a6fe1a8443aae47ba35e4.cu
#include <iostream> #include <fstream> #include <stdlib.h> #include <cstring> #include <limits> // radi definiranja beskonačnosti #include <ctime> // radi mjerenja vremena izvršavanja #include <cmath> // radi "strop" funkcije using namespace std; /* Definiramo beskonačnost kao najveći mogući integer broj. */ #define infty std::numeric_limits<int>::max() void printMatrix (int* G, unsigned int dim) { cout << "\r\n"; for (int i = 0; i < dim*dim; i++) { if (G[i] < infty) { cout << G[i] << "\t"; } else { cout << "∞" << "\t"; } /* Ako je ispisao sve za jedan vrh, prijeđi na sljedeći u novi redak. */ if ((i+1)%dim == 0) { cout << "\r\n"; } } } /* Kernel za device koji implementira prvu fazu blocked Floyd-Warshall algoritma. */ __global__ void FW_Cuda_Phase1(int B, int* D, int* PI, int dim, int primaryBlockDim) { extern __shared__ int shrd[]; int* sh_D = &shrd[0]; int* sh_PI = &shrd[primaryBlockDim*primaryBlockDim]; int i = B*primaryBlockDim + threadIdx.y; int j = B*primaryBlockDim + threadIdx.x; int sub_dim = primaryBlockDim; int sub_i = threadIdx.y; int sub_j = threadIdx.x; __syncthreads(); if (sub_i < sub_dim && sub_j < sub_dim && sub_i*sub_dim+sub_j < sub_dim*sub_dim && i < max(sub_dim, dim) && j < max(sub_dim, dim)) { sh_D[sub_i*sub_dim+sub_j] = D[i*dim+j]; sh_PI[sub_i*sub_dim+sub_j] = PI[i*dim+j]; } else { sh_D[sub_i*sub_dim+sub_j] = INT_MAX; sh_PI[sub_i*sub_dim+sub_j] = -1; } __syncthreads(); for (int sub_k = 0; sub_k < min(sub_dim, dim); sub_k++) { __syncthreads(); if (i < max(sub_dim, dim) && j < max(sub_dim, dim) && sh_D[sub_i*sub_dim+sub_k] < INT_MAX && sh_D[sub_k*sub_dim+sub_j] < INT_MAX) { if (sh_D[sub_i*sub_dim+sub_j] > sh_D[sub_i*sub_dim+sub_k] + sh_D[sub_k*sub_dim+sub_j]) { sh_D[sub_i*sub_dim+sub_j] = sh_D[sub_i*sub_dim+sub_k] + sh_D[sub_k*sub_dim+sub_j]; sh_PI[sub_i*sub_dim+sub_j] = sh_PI[sub_k*sub_dim+sub_j]; } } } __syncthreads(); if (sub_i < min(sub_dim, dim) && sub_j < min(sub_dim, dim) && sub_i*sub_dim+sub_j < sub_dim*sub_dim && i < max(sub_dim, dim) && j < max(sub_dim, dim)) { D[i*dim+j] = sh_D[sub_i*sub_dim+sub_j]; PI[i*dim+j] = sh_PI[sub_i*sub_dim+sub_j]; } } /* Kernel za device koji implementira drugu fazu blocked Floyd-Warshall algoritma. */ __global__ void FW_Cuda_Phase2(int B, int* D, int* PI, int dim, int primaryBlockDim) { extern __shared__ int shrd[]; /* Sve varijable koje imaju prefiks "p_" pripadaju primarnom podbloku, sve koje imaju "c_" pripadaju trenutnom bloku. */ int* p_sh_D = &shrd[0]; int* p_sh_PI = &shrd[primaryBlockDim*primaryBlockDim]; int* c_sh_D = &shrd[2*primaryBlockDim*primaryBlockDim]; int* c_sh_PI = &shrd[3*primaryBlockDim*primaryBlockDim]; int p_i = B*primaryBlockDim + threadIdx.y; int p_j = B*primaryBlockDim + threadIdx.x; /* Ako je trenutni blok prije primarnog, skipCenterBlock biti će 0. Inače, ako je primarni ili neki nakon njega, biti će 1. */ int skipCenterBlock = min((blockIdx.x+1)/(B+1), 1); int c_i, c_j; /* Ako je y koordinata bloka u gridu jednaka 0, onda on pripada istom retku kao i primarni blok. Ako je y koordinata bloka u gridu jednaka 1, pripada istom stupcu kao i primarni blok. */ if (blockIdx.y == 0) { c_i = p_i; c_j = (blockIdx.x+skipCenterBlock)*primaryBlockDim + threadIdx.x; } else { c_i = (blockIdx.x+skipCenterBlock)*primaryBlockDim + threadIdx.y; c_j = p_j; } int sub_dim = primaryBlockDim; int sub_i = threadIdx.y; int sub_j = threadIdx.x; __syncthreads(); p_sh_D[sub_i*sub_dim+sub_j] = D[p_i*dim+p_j]; p_sh_PI[sub_i*sub_dim+sub_j] = PI[p_i*dim+p_j]; if (sub_i < sub_dim && sub_j < sub_dim && sub_i*sub_dim+sub_j < sub_dim*sub_dim && c_i < max(sub_dim, dim) && c_j < max(sub_dim, dim)) { c_sh_D[sub_i*sub_dim+sub_j] = D[c_i*dim+c_j]; c_sh_PI[sub_i*sub_dim+sub_j] = PI[c_i*dim+c_j]; } else { c_sh_D[sub_i*sub_dim+sub_j] = INT_MAX; c_sh_PI[sub_i*sub_dim+sub_j] = -1; } __syncthreads(); for (int sub_k = 0; sub_k < min(sub_dim, dim); sub_k++) { __syncthreads(); /* Pripada istom stupcu kao i primarni blok. */ if (blockIdx.y == 1) { if (c_i < max(sub_dim, dim) && c_j < max(sub_dim, dim) && c_sh_D[sub_i*sub_dim+sub_k] < INT_MAX && p_sh_D[sub_k*sub_dim+sub_j] < INT_MAX) { if (c_sh_D[sub_i*sub_dim+sub_j] > c_sh_D[sub_i*sub_dim+sub_k] + p_sh_D[sub_k*sub_dim+sub_j]) { c_sh_D[sub_i*sub_dim+sub_j] = c_sh_D[sub_i*sub_dim+sub_k] + p_sh_D[sub_k*sub_dim+sub_j]; c_sh_PI[sub_i*sub_dim+sub_j] = p_sh_PI[sub_k*sub_dim+sub_j]; } } } /* Pripada istom retku kao i primarni blok. */ if (blockIdx.y == 0) { if (c_i < max(sub_dim, dim) && c_j < max(sub_dim, dim) && p_sh_D[sub_i*sub_dim+sub_k] < INT_MAX && c_sh_D[sub_k*sub_dim+sub_j] < INT_MAX) { if (c_sh_D[sub_i*sub_dim+sub_j] > p_sh_D[sub_i*sub_dim+sub_k] + c_sh_D[sub_k*sub_dim+sub_j]) { c_sh_D[sub_i*sub_dim+sub_j] = p_sh_D[sub_i*sub_dim+sub_k] + c_sh_D[sub_k*sub_dim+sub_j]; c_sh_PI[sub_i*sub_dim+sub_j] = c_sh_PI[sub_k*sub_dim+sub_j]; } } } __syncthreads(); } __syncthreads(); if (sub_i < min(sub_dim, dim) && sub_j < min(sub_dim, dim) && sub_i*sub_dim+sub_j < sub_dim*sub_dim && c_i < max(sub_dim, dim) && c_j < max(sub_dim, dim)) { D[c_i*dim+c_j] = c_sh_D[sub_i*sub_dim+sub_j]; PI[c_i*dim+c_j] = c_sh_PI[sub_i*sub_dim+sub_j]; } } /* Kernel za device koji implementira treću fazu blocked Floyd-Warshall algoritma. */ __global__ void FW_Cuda_Phase3(int B, int* D, int* PI, int dim, int primaryBlockDim) { extern __shared__ int shrd[]; /* Sve varijable koje imaju prefiks "p1_" pripadaju primarnom podbloku 1 izračunatom u fazi 2, sve koje imaju prefiks "p2_" pripadaju primarnom podbloku 2 izračunatom u fazi 2, a sve koje imaju "c_" pripadaju trenutnom bloku. */ int* p1_sh_D = &shrd[0]; int* p1_sh_PI = &shrd[primaryBlockDim*primaryBlockDim]; int* p2_sh_D = &shrd[2*primaryBlockDim*primaryBlockDim]; int* p2_sh_PI = &shrd[3*primaryBlockDim*primaryBlockDim]; int* c_sh_D = &shrd[4*primaryBlockDim*primaryBlockDim]; int* c_sh_PI = &shrd[5*primaryBlockDim*primaryBlockDim]; /* Ako je trenutni blok prije primarnog, skipCenterBlock biti će 0. Inače, ako je primarni ili neki nakon njega, biti će 1. U ovoj fazi to radimo po obje osi. */ int skipCenterBlockX = min((blockIdx.x+1)/(B+1), 1); int skipCenterBlockY = min((blockIdx.y+1)/(B+1), 1); int c_i = (blockIdx.y+skipCenterBlockY)*primaryBlockDim + threadIdx.y; int c_j = (blockIdx.x+skipCenterBlockX)*primaryBlockDim + threadIdx.x; int p1_i = c_i; int p1_j = B*primaryBlockDim + threadIdx.x; int p2_i = B*primaryBlockDim + threadIdx.y; int p2_j = c_j; int sub_dim = primaryBlockDim; int sub_i = threadIdx.y; int sub_j = threadIdx.x; __syncthreads(); p1_sh_D[sub_i*sub_dim+sub_j] = D[p1_i*dim+p1_j]; p1_sh_PI[sub_i*sub_dim+sub_j] = PI[p1_i*dim+p1_j]; p2_sh_D[sub_i*sub_dim+sub_j] = D[p2_i*dim+p2_j]; p2_sh_PI[sub_i*sub_dim+sub_j] = PI[p2_i*dim+p2_j]; if (sub_i < sub_dim && sub_j < sub_dim && sub_i*sub_dim+sub_j < sub_dim*sub_dim && c_i < dim && c_j < dim) { c_sh_D[sub_i*sub_dim+sub_j] = D[c_i*dim+c_j]; c_sh_PI[sub_i*sub_dim+sub_j] = PI[c_i*dim+c_j]; } else { c_sh_D[sub_i*sub_dim+sub_j] = INT_MAX; c_sh_PI[sub_i*sub_dim+sub_j] = -1; } __syncthreads(); for (int sub_k = 0; sub_k < min(sub_dim, dim); sub_k++) { __syncthreads(); if (c_i < max(sub_dim, dim) && c_j < max(sub_dim, dim) && p1_sh_D[sub_i*sub_dim+sub_k] < INT_MAX && p2_sh_D[sub_k*sub_dim+sub_j] < INT_MAX) { if (c_sh_D[sub_i*sub_dim+sub_j] > p1_sh_D[sub_i*sub_dim+sub_k] + p2_sh_D[sub_k*sub_dim+sub_j]) { c_sh_D[sub_i*sub_dim+sub_j] = p1_sh_D[sub_i*sub_dim+sub_k] + p2_sh_D[sub_k*sub_dim+sub_j]; c_sh_PI[sub_i*sub_dim+sub_j] = p2_sh_PI[sub_k*sub_dim+sub_j]; } } __syncthreads(); } __syncthreads(); if (sub_i < min(sub_dim, dim) && sub_j < min(sub_dim, dim) && sub_i*sub_dim+sub_j < sub_dim*sub_dim && c_i < max(sub_dim, dim) && c_j < max(sub_dim, dim)) { D[c_i*dim+c_j] = c_sh_D[sub_i*sub_dim+sub_j]; PI[c_i*dim+c_j] = c_sh_PI[sub_i*sub_dim+sub_j]; } } void Blocked_Floyd_Warshall_Cuda (int* W, int* D, int* PI, unsigned int dim) { unsigned int n = dim*dim; /* Error varijabla za handleanje CUDA errora. */ cudaError_t err = cudaSuccess; /* Alociranje device varijabli matrica D i PI. */ int* d_D = NULL; err = cudaMalloc((void**) &d_D, n*sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "Neuspješno alociranje matrice D (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } int* d_PI = NULL; err = cudaMalloc((void**) &d_PI, n*sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "Neuspješno alociranje matrice PI (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* Kopiranje podataka iz host matrica u device. */ err = cudaMemcpy(d_D, D, n*sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Neuspješno kopiranje matrice D iz hosta u device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_PI, PI, n*sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Neuspješno kopiranje matrice PI iz hosta u device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* Pozivanje CUDA kernela. */ int blockDim = 25; /* Ukoliko je dimenzija bloka 32, imamo 1024 threadova po bloku. U blocked verziji Floyd-Warshall algoritma ovaj broj ovisi o tome koliko threadova se može pokrenuti u jednom bloku na nekoj grafičkoj kartici, ali i o tome koliko shared memorije po multiprocesoru ima dotična grafička kartica. Konkretno, za Teslu C2050/C2070 na kakvoj je rađen ovaj rad, maksimalno je 1024 threada po bloku, te je maksimalna shareana memorija multiprocesora 48kb, što bi značilo da možemo spremiti 4096 int elementa (jer u fazi 3 nam trebaju 3 bloka pa zapravo imamo 16kb po bloku za spremanje integera). Kako moramo gledati broj koji je manji (između 1024 zbog threadova/bloku i 4096 zbog shareane memorije), algoritam ćemo pokretati tako da je jedan blok veličine najviše 32*32, pri čemu se za veličinu bloka uzima prvi broj manji ili jednak broju 32 s kojim je dijeljiv broj vrhova grafa. */ int numberOfBlocks = ceil((float)dim/(float)blockDim); /* Broj (primarnih) blokova u blocked Floyd-Warshall algoritmu. */ cout << "Blocked Floyd-Warshall algoritam se pokreće sa " << numberOfBlocks << " primarna bloka po dijagonali.\r\n"; cout << "CUDA kerneli se pokreću kako slijedi: \r\n \t Faza 1: grid dimenzije 1x1 \r\n"; cout << "\t Faza 2: grid dimenzije " << numberOfBlocks-1 << "x2"; if (numberOfBlocks-1 == 0) cout << " (Faza 2 se neće izvršiti zbog dimenzija grida)"; cout << "\r\n"; cout << "\t Faza 3: grid dimenzije " << numberOfBlocks-1 << "x" << numberOfBlocks-1; if (numberOfBlocks-1 == 0) cout << " (Faza 3 se neće izvršiti zbog dimenzija grida)"; cout << "\r\n"; cout << "Svi blokovi se pokreću s " << blockDim*blockDim << " threada po bloku.\r\n"; /* Iteriranje po blokovima radimo na CPU, ostalo paraleliziramo. */ for (int B = 0; B < numberOfBlocks; B++) { /* Veličina shared memorije je blockDim*blockDim za matricu D i za matricu PI. */ FW_Cuda_Phase1<<<dim3(1, 1, 1), dim3(blockDim, blockDim, 1), 2*blockDim*blockDim*sizeof(int)>>> (B, d_D, d_PI, dim, blockDim); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Neuspješno pokrenuta kernel metoda FW_Cuda_Phase1 (error code %s)!\n", cudaGetErrorString(err)); cout << "\r\n B = " << B << "\r\n"; exit(EXIT_FAILURE); } cudaThreadSynchronize(); /* Veličina shared memorije je blockDim*blockDim za primarnu matricu D, trenutnu matricu D, primarnu i trenutnu za matricu PI. */ if (numberOfBlocks-1 > 0) { FW_Cuda_Phase2<<<dim3(numberOfBlocks-1, 2, 1), dim3(blockDim, blockDim, 1), 4*blockDim*blockDim*sizeof(int)>>> (B, d_D, d_PI, dim, blockDim); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Neuspješno pokrenuta kernel metoda FW_Cuda_Phase2 (error code %s)!\n", cudaGetErrorString(err)); cout << "\r\n B = " << B << "\r\n"; exit(EXIT_FAILURE); } } cudaThreadSynchronize(); /* Veličina shared memorije je blockDim*blockDim za trenutnu matricu D, dvije primarne matrice D izračunate u fazi 2, te za pripadne matrice PI. */ if (numberOfBlocks-1 > 0) { FW_Cuda_Phase3<<<dim3(numberOfBlocks-1, numberOfBlocks-1, 1), dim3(blockDim, blockDim, 1), 6*blockDim*blockDim*sizeof(int)>>> (B, d_D, d_PI, dim, blockDim); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Neuspješno pokrenuta kernel metoda FW_Cuda_Phase3 (error code %s)!\n", cudaGetErrorString(err)); cout << "\r\n B = " << B << "\r\n"; exit(EXIT_FAILURE); } } /* Sinkronizacija threadova kako bi se završila B-ta iteracija, te kako bi se prešlo na (B+1). iteraciju. */ cudaThreadSynchronize(); } /* Kopiranje podataka iz device matrica u host. */ err = cudaMemcpy(D, d_D, n*sizeof(int), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Neuspješno kopiranje matrice D iz devicea u host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(PI, d_PI, n*sizeof(int), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Neuspješno kopiranje matrice PI iz devicea u host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* Dealociranje device varijabli matrica D i PI. */ err = cudaFree(d_D); if (err != cudaSuccess) { fprintf(stderr, "Neuspješno dealociranje matrice D (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_PI); if (err != cudaSuccess) { fprintf(stderr, "Neuspješno dealociranje matrice PI (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* Reset CUDA devicea i završavanje CUDA Floyd-Warshalla. */ err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Neuspješno resetiranje devicea (završavanje sa CUDA FW, priprema za sljedeće pokretanje)! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /* Metoda koja rekonstruira težinu najkraćeg puta za dani par vrhova koristeći matricu prethodnika PI i matricu inicijalnih težina W. */ int getPath (int* W, int* PI, int i, int j, unsigned int dim) { if (i == j) { return 0; } else if (PI[i*dim+j] == -1) { return infty; } else { int recursivePath = getPath(W, PI, i, PI[i*dim+j], dim); if (recursivePath < infty) { return recursivePath + W[PI[i*dim+j]*dim+j]; } else { return infty; } } } /* Za svaki par vrhova pokreće getPath metodu koja rekonstruira težinu najkraćeg puta između njih koristeći matricu prethodnika PI. Tu težinu onda uspoređuje sa dobivenom težinom za isti par vrhova u matrici najkraćih putova D. */ bool checkSolutionCorrectness (int* W, int* D, int* PI, unsigned int dim) { for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { if (getPath(W, PI, i, j, dim) != D[i*dim+j]) { return false; } } } return true; } int main() { /* V - broj vrhova E - broj bridova u - prvi vrh pri učitavanju grafa iz datoteke v - drugi vrh pri učitavanju grafa iz datoteke w - težina između v1 i v2 pri učitavanju grafa iz datoteke */ unsigned int V, E; int u, v, w; ifstream inputGraphFile; inputGraphFile.open("graphFile.txt"); ofstream outputFile; outputFile.open("output_cuda_blocked.txt"); inputGraphFile >> V >> E; cout << "V = " << V << ", E = " << E << "\r\n"; unsigned int n = V*V; /* Inicijalizacija grafova u memoriji. */ int* W = (int*)malloc(n*sizeof(int)); int* D = (int*)malloc(n*sizeof(int)); int* PI = (int*)malloc(n*sizeof(int)); /* Postavljanje inicijalnih vrijednosti za matricu prethodnika PI(0), matricu težina W i matricu najkraćih putova D(0). */ fill_n(W, n, infty); fill_n(PI, n, -1); for (int i = 0; i < E; i++) { inputGraphFile >> u >> v >> w; //cout << u << " <-- " << w << " --> " << v << "\r\n"; W[u*V+v] = w; if (u != v) { PI[u*V+v] = u; } } for (int i = 0; i < V; i++) { W[i*V+i] = 0; } /* D(0) = W na početku. */ memcpy (D, W, n*sizeof(int)); // printMatrix(W, V); printMatrix(D, V); printMatrix(PI, V); /* Početak mjerenja izvršavanja Floyd-Warshall algoritma. */ clock_t begin = clock(); /* Pozivamo Floyd-Warshall CPU algoritam nad učitanim grafom. */ Blocked_Floyd_Warshall_Cuda(W, D, PI, V); /* Kraj mjerenja izvršavanja Floyd-Warshall algoritma. */ clock_t end = clock(); double elapsedTime = double(end - begin) / CLOCKS_PER_SEC; //printMatrix(W, V); printMatrix(D, V); printMatrix(PI, V); /* Ispis rezultata u datoteku. */ outputFile << "|V| = " << V << ", |E| = " << E << "\r\n\r\n"; for (int i = 0; i < n; i++) { if (i%V==0) outputFile << "\r\n"; if (D[i] < infty) outputFile << D[i] << "\t"; else outputFile << "∞" << "\t"; } outputFile << "\r\n\r\n"; for (int i = 0; i < n; i++) { if (i%V==0) outputFile << "\r\n"; outputFile << PI[i] << "\t"; } cout << "Vrijeme izvršavanja Blocked Floyd-Warshall algoritma: " << elapsedTime << "s.\r\n"; if (checkSolutionCorrectness(W, D, PI, V) == true) cout << "Svi najkraći putevi su točno izračunati!\r\n"; else cout << "Najkraći putevi nisu točno izračunati.\r\n"; inputGraphFile.close(); outputFile.close(); free(W); free(D); free(PI); return 0; }